Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-next-6.8-2023-12-08' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.8-2023-12-08:

amdgpu:
- SR-IOV fixes
- DCN 3.5 updates
- Backlight fixes
- MST fixes
- DMCUB fixes
- DPIA fixes
- Display powergating updates
- Enable writeback connectors
- Misc code cleanups
- Add more register state debugging for aquavanjaram
- Suspend fix
- Clockgating fixes
- SMU 14 updates
- PSR fixes
- MES logging updates
- Misc fixes

amdkfd:
- SVM fix

radeon:
- Fix potential memory leaks in error paths

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231208205613.4861-1-alexander.deucher@amd.com

+2110 -422
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
··· 2147 2147 amdgpu_debugfs_firmware_init(adev); 2148 2148 amdgpu_ta_if_debugfs_init(adev); 2149 2149 2150 + amdgpu_debugfs_mes_event_log_init(adev); 2151 + 2150 2152 #if defined(CONFIG_DRM_AMD_DC) 2151 2153 if (adev->dc_enabled) 2152 2154 dtn_debugfs_init(adev);
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
··· 32 32 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); 33 33 void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); 34 34 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); 35 + void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev); 36 +
+61
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
··· 98 98 return 0; 99 99 } 100 100 101 + static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) 102 + { 103 + int r; 104 + 105 + r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 106 + AMDGPU_GEM_DOMAIN_GTT, 107 + &adev->mes.event_log_gpu_obj, 108 + &adev->mes.event_log_gpu_addr, 109 + &adev->mes.event_log_cpu_addr); 110 + if (r) { 111 + dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r); 112 + return r; 113 + } 114 + 115 + memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE); 116 + 117 + return 0; 118 + 119 + } 120 + 101 121 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) 102 122 { 103 123 bitmap_free(adev->mes.doorbell_bitmap); ··· 202 182 if (r) 203 183 goto error; 204 184 185 + r = amdgpu_mes_event_log_init(adev); 186 + if (r) 187 + goto error_doorbell; 188 + 205 189 return 0; 206 190 191 + error_doorbell: 192 + amdgpu_mes_doorbell_free(adev); 207 193 error: 208 194 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 209 195 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); ··· 225 199 226 200 void amdgpu_mes_fini(struct amdgpu_device *adev) 227 201 { 202 + amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj, 203 + &adev->mes.event_log_gpu_addr, 204 + &adev->mes.event_log_cpu_addr); 205 + 228 206 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 229 207 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 230 208 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); ··· 1508 1478 out: 1509 1479 amdgpu_ucode_release(&adev->mes.fw[pipe]); 1510 1480 return r; 1481 + } 1482 + 1483 + #if defined(CONFIG_DEBUG_FS) 1484 + 1485 + static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) 1486 + { 1487 + struct amdgpu_device *adev = m->private; 1488 + uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); 1489 + 1490 + seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, 1491 + mem, PAGE_SIZE, false); 1492 + 1493 + return 0; 1494 + } 1495 + 1496 + 1497 + DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); 1498 + 1499 + #endif 1500 + 1501 + void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) 1502 + { 1503 + 1504 + #if defined(CONFIG_DEBUG_FS) 1505 + struct drm_minor *minor = adev_to_drm(adev)->primary; 1506 + struct dentry *root = minor->debugfs_root; 1507 + 1508 + debugfs_create_file("amdgpu_mes_event_log", 0444, root, 1509 + adev, &amdgpu_debugfs_mes_event_log_fops); 1510 + 1511 + #endif 1511 1512 }
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
··· 133 133 uint32_t num_mes_dbs; 134 134 unsigned long *doorbell_bitmap; 135 135 136 + /* MES event log buffer */ 137 + struct amdgpu_bo *event_log_gpu_obj; 138 + uint64_t event_log_gpu_addr; 139 + void *event_log_cpu_addr; 140 + 136 141 /* ip specific functions */ 137 142 const struct amdgpu_mes_funcs *funcs; 138 143 };
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
··· 416 416 417 417 int otg_inst; 418 418 struct drm_pending_vblank_event *event; 419 + 420 + bool wb_pending; 421 + bool wb_enabled; 422 + struct drm_writeback_connector *wb_conn; 419 423 }; 420 424 421 425 struct amdgpu_encoder_atom_dig {
+203
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
··· 848 848 return xgmi_reg_state->common_header.structure_size; 849 849 } 850 850 851 + #define smnreg_0x11C00070 0x11C00070 852 + #define smnreg_0x11C00210 0x11C00210 853 + 854 + static struct aqua_reg_list wafl_reg_addrs[] = { 855 + { smnreg_0x11C00070, 4, DW_ADDR_INCR }, 856 + { smnreg_0x11C00210, 1, 0 }, 857 + }; 858 + 859 + #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20)) 860 + 861 + #define NUM_WAFL_SMN_REGS 5 862 + 863 + static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev, 864 + void *buf, size_t max_size) 865 + { 866 + struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state; 867 + uint32_t start_addr, incrx, num_regs, szbuf; 868 + struct amdgpu_regs_wafl_v1_0 *wafl_regs; 869 + struct amdgpu_smn_reg_data *reg_data; 870 + const int max_wafl_instances = 8; 871 + int inst = 0, i, j, r, n; 872 + const int wafl_inst = 2; 873 + void *p; 874 + 875 + if (!buf || !max_size) 876 + return -EINVAL; 877 + 878 + wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf; 879 + 880 + szbuf = sizeof(*wafl_reg_state) + 881 + amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs), 882 + NUM_WAFL_SMN_REGS); 883 + 884 + if (max_size < szbuf) 885 + return -EOVERFLOW; 886 + 887 + p = &wafl_reg_state->wafl_state_regs[0]; 888 + for_each_inst(i, adev->aid_mask) { 889 + for (j = 0; j < wafl_inst; ++j) { 890 + wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p; 891 + wafl_regs->inst_header.instance = inst++; 892 + 893 + wafl_regs->inst_header.state = AMDGPU_INST_S_OK; 894 + wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS; 895 + 896 + reg_data = wafl_regs->smn_reg_values; 897 + 898 + for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) { 899 + start_addr = wafl_reg_addrs[r].start_addr; 900 + incrx = wafl_reg_addrs[r].incrx; 901 + num_regs = wafl_reg_addrs[r].num_regs; 902 + for (n = 0; n < num_regs; n++) { 903 + aqua_read_smn_ext( 904 + adev, reg_data, 905 + WAFL_LINK_REG(start_addr, j) + 906 + n * incrx, 907 + i); 908 + ++reg_data; 909 + } 910 + } 911 + p = reg_data; 912 + } 913 + } 914 + 915 + wafl_reg_state->common_header.structure_size = szbuf; 916 + wafl_reg_state->common_header.format_revision = 1; 917 + wafl_reg_state->common_header.content_revision = 0; 918 + wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL; 919 + wafl_reg_state->common_header.num_instances = max_wafl_instances; 920 + 921 + return wafl_reg_state->common_header.structure_size; 922 + } 923 + 924 + #define smnreg_0x1B311060 0x1B311060 925 + #define smnreg_0x1B411060 0x1B411060 926 + #define smnreg_0x1B511060 0x1B511060 927 + #define smnreg_0x1B611060 0x1B611060 928 + 929 + #define smnreg_0x1C307120 0x1C307120 930 + #define smnreg_0x1C317120 0x1C317120 931 + 932 + #define smnreg_0x1C320830 0x1C320830 933 + #define smnreg_0x1C380830 0x1C380830 934 + #define smnreg_0x1C3D0830 0x1C3D0830 935 + #define smnreg_0x1C420830 0x1C420830 936 + 937 + #define smnreg_0x1C320100 0x1C320100 938 + #define smnreg_0x1C380100 0x1C380100 939 + #define smnreg_0x1C3D0100 0x1C3D0100 940 + #define smnreg_0x1C420100 0x1C420100 941 + 942 + #define smnreg_0x1B310500 0x1B310500 943 + #define smnreg_0x1C300400 0x1C300400 944 + 945 + #define USR_CAKE_INCR 0x11000 946 + #define USR_LINK_INCR 0x100000 947 + #define USR_CP_INCR 0x10000 948 + 949 + #define NUM_USR_SMN_REGS 20 950 + 951 + struct aqua_reg_list usr_reg_addrs[] = { 952 + { smnreg_0x1B311060, 4, DW_ADDR_INCR }, 953 + { smnreg_0x1B411060, 4, DW_ADDR_INCR }, 954 + { smnreg_0x1B511060, 4, DW_ADDR_INCR }, 955 + { smnreg_0x1B611060, 4, DW_ADDR_INCR }, 956 + { smnreg_0x1C307120, 2, DW_ADDR_INCR }, 957 + { smnreg_0x1C317120, 2, DW_ADDR_INCR }, 958 + }; 959 + 960 + #define NUM_USR1_SMN_REGS 46 961 + struct aqua_reg_list usr1_reg_addrs[] = { 962 + { smnreg_0x1C320830, 6, USR_CAKE_INCR }, 963 + { smnreg_0x1C380830, 5, USR_CAKE_INCR }, 964 + { smnreg_0x1C3D0830, 5, USR_CAKE_INCR }, 965 + { smnreg_0x1C420830, 4, USR_CAKE_INCR }, 966 + { smnreg_0x1C320100, 6, USR_CAKE_INCR }, 967 + { smnreg_0x1C380100, 5, USR_CAKE_INCR }, 968 + { smnreg_0x1C3D0100, 5, USR_CAKE_INCR }, 969 + { smnreg_0x1C420100, 4, USR_CAKE_INCR }, 970 + { smnreg_0x1B310500, 4, USR_LINK_INCR }, 971 + { smnreg_0x1C300400, 2, USR_CP_INCR }, 972 + }; 973 + 974 + static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev, 975 + void *buf, size_t max_size, 976 + int reg_state) 977 + { 978 + uint32_t start_addr, incrx, num_regs, szbuf, num_smn; 979 + struct amdgpu_reg_state_usr_v1_0 *usr_reg_state; 980 + struct amdgpu_regs_usr_v1_0 *usr_regs; 981 + struct amdgpu_smn_reg_data *reg_data; 982 + const int max_usr_instances = 4; 983 + struct aqua_reg_list *reg_addrs; 984 + int inst = 0, i, n, r, arr_size; 985 + void *p; 986 + 987 + if (!buf || !max_size) 988 + return -EINVAL; 989 + 990 + switch (reg_state) { 991 + case AMDGPU_REG_STATE_TYPE_USR: 992 + arr_size = ARRAY_SIZE(usr_reg_addrs); 993 + reg_addrs = usr_reg_addrs; 994 + num_smn = NUM_USR_SMN_REGS; 995 + break; 996 + case AMDGPU_REG_STATE_TYPE_USR_1: 997 + arr_size = ARRAY_SIZE(usr1_reg_addrs); 998 + reg_addrs = usr1_reg_addrs; 999 + num_smn = NUM_USR1_SMN_REGS; 1000 + break; 1001 + default: 1002 + return -EINVAL; 1003 + } 1004 + 1005 + usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf; 1006 + 1007 + szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances, 1008 + sizeof(*usr_regs), 1009 + num_smn); 1010 + if (max_size < szbuf) 1011 + return -EOVERFLOW; 1012 + 1013 + p = &usr_reg_state->usr_state_regs[0]; 1014 + for_each_inst(i, adev->aid_mask) { 1015 + usr_regs = (struct amdgpu_regs_usr_v1_0 *)p; 1016 + usr_regs->inst_header.instance = inst++; 1017 + usr_regs->inst_header.state = AMDGPU_INST_S_OK; 1018 + usr_regs->inst_header.num_smn_regs = num_smn; 1019 + reg_data = usr_regs->smn_reg_values; 1020 + 1021 + for (r = 0; r < arr_size; r++) { 1022 + start_addr = reg_addrs[r].start_addr; 1023 + incrx = reg_addrs[r].incrx; 1024 + num_regs = reg_addrs[r].num_regs; 1025 + for (n = 0; n < num_regs; n++) { 1026 + aqua_read_smn_ext(adev, reg_data, 1027 + start_addr + n * incrx, i); 1028 + reg_data++; 1029 + } 1030 + } 1031 + p = reg_data; 1032 + } 1033 + 1034 + usr_reg_state->common_header.structure_size = szbuf; 1035 + usr_reg_state->common_header.format_revision = 1; 1036 + usr_reg_state->common_header.content_revision = 0; 1037 + usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR; 1038 + usr_reg_state->common_header.num_instances = max_usr_instances; 1039 + 1040 + return usr_reg_state->common_header.structure_size; 1041 + } 1042 + 851 1043 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev, 852 1044 enum amdgpu_reg_state reg_state, void *buf, 853 1045 size_t max_size) ··· 1052 860 break; 1053 861 case AMDGPU_REG_STATE_TYPE_XGMI: 1054 862 size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size); 863 + break; 864 + case AMDGPU_REG_STATE_TYPE_WAFL: 865 + size = aqua_vanjaram_read_wafl_state(adev, buf, max_size); 866 + break; 867 + case AMDGPU_REG_STATE_TYPE_USR: 868 + size = aqua_vanjaram_read_usr_state(adev, buf, max_size, 869 + AMDGPU_REG_STATE_TYPE_USR); 870 + break; 871 + case AMDGPU_REG_STATE_TYPE_USR_1: 872 + size = aqua_vanjaram_read_usr_state( 873 + adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1); 1055 874 break; 1056 875 default: 1057 876 return -EINVAL;
+2
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 408 408 mes_set_hw_res_pkt.enable_reg_active_poll = 1; 409 409 mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; 410 410 mes_set_hw_res_pkt.oversubscription_timer = 50; 411 + mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; 412 + mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; 411 413 412 414 return mes_v11_0_submit_pkt_and_poll_completion(mes, 413 415 &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
+29 -19
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
··· 100 100 return amdgpu_vcn_early_init(adev); 101 101 } 102 102 103 + static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) 104 + { 105 + volatile struct amdgpu_vcn4_fw_shared *fw_shared; 106 + 107 + fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 108 + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); 109 + fw_shared->sq.is_enabled = 1; 110 + 111 + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); 112 + fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? 113 + AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; 114 + 115 + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == 116 + IP_VERSION(4, 0, 2)) { 117 + fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; 118 + fw_shared->drm_key_wa.method = 119 + AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; 120 + } 121 + 122 + if (amdgpu_vcnfw_log) 123 + amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); 124 + 125 + return 0; 126 + } 127 + 103 128 /** 104 129 * vcn_v4_0_sw_init - sw init for VCN block 105 130 * ··· 149 124 return r; 150 125 151 126 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 152 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 153 - 154 127 if (adev->vcn.harvest_config & (1 << i)) 155 128 continue; 156 129 ··· 184 161 if (r) 185 162 return r; 186 163 187 - fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 188 - fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); 189 - fw_shared->sq.is_enabled = 1; 190 - 191 - fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); 192 - fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? 193 - AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; 194 - 195 - if (amdgpu_ip_version(adev, VCN_HWIP, 0) == 196 - IP_VERSION(4, 0, 2)) { 197 - fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; 198 - fw_shared->drm_key_wa.method = 199 - AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; 200 - } 201 - 202 - if (amdgpu_vcnfw_log) 203 - amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); 164 + vcn_v4_0_fw_shared_init(adev, i); 204 165 } 205 166 206 167 if (amdgpu_sriov_vf(adev)) { ··· 1279 1272 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1280 1273 if (adev->vcn.harvest_config & (1 << i)) 1281 1274 continue; 1275 + 1276 + // Must re/init fw_shared at beginning 1277 + vcn_v4_0_fw_shared_init(adev, i); 1282 1278 1283 1279 table_size = 0; 1284 1280
+81 -65
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 442 442 goto out_free; 443 443 } 444 444 if (cpages != npages) 445 - pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 445 + pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", 446 446 cpages, npages); 447 447 else 448 - pr_debug("0x%lx pages migrated\n", cpages); 448 + pr_debug("0x%lx pages collected\n", cpages); 449 449 450 450 r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); 451 451 migrate_vma_pages(&migrate); ··· 479 479 * svm_migrate_ram_to_vram - migrate svm range from system to device 480 480 * @prange: range structure 481 481 * @best_loc: the device to migrate to 482 + * @start_mgr: start page to migrate 483 + * @last_mgr: last page to migrate 482 484 * @mm: the process mm structure 483 485 * @trigger: reason of migration 484 486 * ··· 491 489 */ 492 490 static int 493 491 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, 492 + unsigned long start_mgr, unsigned long last_mgr, 494 493 struct mm_struct *mm, uint32_t trigger) 495 494 { 496 495 unsigned long addr, start, end; ··· 501 498 unsigned long cpages = 0; 502 499 long r = 0; 503 500 504 - if (prange->actual_loc == best_loc) { 505 - pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", 506 - prange->svms, prange->start, prange->last, best_loc); 507 - return 0; 501 + if (start_mgr < prange->start || last_mgr > prange->last) { 502 + pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", 503 + start_mgr, last_mgr, prange->start, prange->last); 504 + return -EFAULT; 508 505 } 509 506 510 507 node = svm_range_get_node_by_id(prange, best_loc); ··· 513 510 return -ENODEV; 514 511 } 515 512 516 - pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, 517 - prange->start, prange->last, best_loc); 513 + pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n", 514 + prange->svms, start_mgr, last_mgr, prange->start, prange->last, 515 + best_loc); 518 516 519 - start = prange->start << PAGE_SHIFT; 520 - end = (prange->last + 1) << PAGE_SHIFT; 517 + start = start_mgr << PAGE_SHIFT; 518 + end = (last_mgr + 1) << PAGE_SHIFT; 521 519 522 520 r = svm_range_vram_node_new(node, prange, true); 523 521 if (r) { 524 522 dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); 525 523 return r; 526 524 } 527 - ttm_res_offset = prange->offset << PAGE_SHIFT; 525 + ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT; 528 526 529 527 for (addr = start; addr < end;) { 530 528 unsigned long next; ··· 548 544 549 545 if (cpages) { 550 546 prange->actual_loc = best_loc; 551 - svm_range_dma_unmap(prange); 552 - } else { 547 + prange->vram_pages = prange->vram_pages + cpages; 548 + } else if (!prange->actual_loc) { 549 + /* if no page migrated and all pages from prange are at 550 + * sys ram drop svm_bo got from svm_range_vram_node_new 551 + */ 553 552 svm_range_vram_node_free(prange); 554 553 } 555 554 ··· 670 663 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex 671 664 * 672 665 * Return: 673 - * 0 - success with all pages migrated 674 666 * negative values - indicate error 675 - * positive values - partial migration, number of pages not migrated 667 + * positive values or zero - number of pages got migrated 676 668 */ 677 669 static long 678 670 svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, ··· 682 676 uint64_t npages = (end - start) >> PAGE_SHIFT; 683 677 unsigned long upages = npages; 684 678 unsigned long cpages = 0; 679 + unsigned long mpages = 0; 685 680 struct amdgpu_device *adev = node->adev; 686 681 struct kfd_process_device *pdd; 687 682 struct dma_fence *mfence = NULL; ··· 732 725 goto out_free; 733 726 } 734 727 if (cpages != npages) 735 - pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 728 + pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", 736 729 cpages, npages); 737 730 else 738 - pr_debug("0x%lx pages migrated\n", cpages); 731 + pr_debug("0x%lx pages collected\n", cpages); 739 732 740 733 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, 741 734 scratch, npages); ··· 758 751 kvfree(buf); 759 752 out: 760 753 if (!r && cpages) { 754 + mpages = cpages - upages; 761 755 pdd = svm_range_get_pdd_by_node(prange, node); 762 756 if (pdd) 763 - WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); 757 + WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); 764 758 } 765 - return r ? r : upages; 759 + 760 + return r ? r : mpages; 766 761 } 767 762 768 763 /** 769 764 * svm_migrate_vram_to_ram - migrate svm range from device to system 770 765 * @prange: range structure 771 766 * @mm: process mm, use current->mm if NULL 767 + * @start_mgr: start page need be migrated to sys ram 768 + * @last_mgr: last page need be migrated to sys ram 772 769 * @trigger: reason of migration 773 770 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback 774 771 * ··· 782 771 * 0 - OK, otherwise error code 783 772 */ 784 773 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, 774 + unsigned long start_mgr, unsigned long last_mgr, 785 775 uint32_t trigger, struct page *fault_page) 786 776 { 787 777 struct kfd_node *node; ··· 790 778 unsigned long addr; 791 779 unsigned long start; 792 780 unsigned long end; 793 - unsigned long upages = 0; 781 + unsigned long mpages = 0; 794 782 long r = 0; 795 783 784 + /* this pragne has no any vram page to migrate to sys ram */ 796 785 if (!prange->actual_loc) { 797 786 pr_debug("[0x%lx 0x%lx] already migrated to ram\n", 798 787 prange->start, prange->last); 799 788 return 0; 789 + } 790 + 791 + if (start_mgr < prange->start || last_mgr > prange->last) { 792 + pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", 793 + start_mgr, last_mgr, prange->start, prange->last); 794 + return -EFAULT; 800 795 } 801 796 802 797 node = svm_range_get_node_by_id(prange, prange->actual_loc); ··· 812 793 return -ENODEV; 813 794 } 814 795 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", 815 - prange->svms, prange, prange->start, prange->last, 796 + prange->svms, prange, start_mgr, last_mgr, 816 797 prange->actual_loc); 817 798 818 - start = prange->start << PAGE_SHIFT; 819 - end = (prange->last + 1) << PAGE_SHIFT; 799 + start = start_mgr << PAGE_SHIFT; 800 + end = (last_mgr + 1) << PAGE_SHIFT; 820 801 821 802 for (addr = start; addr < end;) { 822 803 unsigned long next; ··· 835 816 pr_debug("failed %ld to migrate prange %p\n", r, prange); 836 817 break; 837 818 } else { 838 - upages += r; 819 + mpages += r; 839 820 } 840 821 addr = next; 841 822 } 842 823 843 - if (r >= 0 && !upages) { 844 - svm_range_vram_node_free(prange); 845 - prange->actual_loc = 0; 824 + if (r >= 0) { 825 + prange->vram_pages -= mpages; 826 + 827 + /* prange does not have vram page set its actual_loc to system 828 + * and drop its svm_bo ref 829 + */ 830 + if (prange->vram_pages == 0 && prange->ttm_res) { 831 + prange->actual_loc = 0; 832 + svm_range_vram_node_free(prange); 833 + } 846 834 } 847 835 848 836 return r < 0 ? r : 0; ··· 859 833 * svm_migrate_vram_to_vram - migrate svm range from device to device 860 834 * @prange: range structure 861 835 * @best_loc: the device to migrate to 836 + * @start: start page need be migrated to sys ram 837 + * @last: last page need be migrated to sys ram 862 838 * @mm: process mm, use current->mm if NULL 863 839 * @trigger: reason of migration 864 840 * 865 841 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 842 + * 843 + * migrate all vram pages in prange to sys ram, then migrate 844 + * [start, last] pages from sys ram to gpu node best_loc. 866 845 * 867 846 * Return: 868 847 * 0 - OK, otherwise error code 869 848 */ 870 849 static int 871 850 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, 872 - struct mm_struct *mm, uint32_t trigger) 851 + unsigned long start, unsigned long last, 852 + struct mm_struct *mm, uint32_t trigger) 873 853 { 874 854 int r, retries = 3; 875 855 ··· 887 855 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); 888 856 889 857 do { 890 - r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL); 858 + r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, 859 + trigger, NULL); 891 860 if (r) 892 861 return r; 893 862 } while (prange->actual_loc && --retries); ··· 896 863 if (prange->actual_loc) 897 864 return -EDEADLK; 898 865 899 - return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); 866 + return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger); 900 867 } 901 868 902 869 int 903 870 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, 871 + unsigned long start, unsigned long last, 904 872 struct mm_struct *mm, uint32_t trigger) 905 873 { 906 - if (!prange->actual_loc) 907 - return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); 874 + if (!prange->actual_loc || prange->actual_loc == best_loc) 875 + return svm_migrate_ram_to_vram(prange, best_loc, start, last, 876 + mm, trigger); 877 + 908 878 else 909 - return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger); 879 + return svm_migrate_vram_to_vram(prange, best_loc, start, last, 880 + mm, trigger); 910 881 911 882 } 912 883 ··· 926 889 */ 927 890 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) 928 891 { 892 + unsigned long start, last, size; 929 893 unsigned long addr = vmf->address; 930 894 struct svm_range_bo *svm_bo; 931 - enum svm_work_list_ops op; 932 - struct svm_range *parent; 933 895 struct svm_range *prange; 934 896 struct kfd_process *p; 935 897 struct mm_struct *mm; ··· 965 929 966 930 mutex_lock(&p->svms.lock); 967 931 968 - prange = svm_range_from_addr(&p->svms, addr, &parent); 932 + prange = svm_range_from_addr(&p->svms, addr, NULL); 969 933 if (!prange) { 970 934 pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); 971 935 r = -EFAULT; 972 936 goto out_unlock_svms; 973 937 } 974 938 975 - mutex_lock(&parent->migrate_mutex); 976 - if (prange != parent) 977 - mutex_lock_nested(&prange->migrate_mutex, 1); 939 + mutex_lock(&prange->migrate_mutex); 978 940 979 941 if (!prange->actual_loc) 980 942 goto out_unlock_prange; 981 943 982 - svm_range_lock(parent); 983 - if (prange != parent) 984 - mutex_lock_nested(&prange->lock, 1); 985 - r = svm_range_split_by_granularity(p, mm, addr, parent, prange); 986 - if (prange != parent) 987 - mutex_unlock(&prange->lock); 988 - svm_range_unlock(parent); 989 - if (r) { 990 - pr_debug("failed %d to split range by granularity\n", r); 991 - goto out_unlock_prange; 992 - } 944 + /* Align migration range start and size to granularity size */ 945 + size = 1UL << prange->granularity; 946 + start = max(ALIGN_DOWN(addr, size), prange->start); 947 + last = min(ALIGN(addr + 1, size) - 1, prange->last); 993 948 994 - r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, 995 - KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, 996 - vmf->page); 949 + r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last, 950 + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page); 997 951 if (r) 998 952 pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", 999 - r, prange->svms, prange, prange->start, prange->last); 1000 - 1001 - /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ 1002 - if (p->xnack_enabled && parent == prange) 1003 - op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; 1004 - else 1005 - op = SVM_OP_UPDATE_RANGE_NOTIFIER; 1006 - svm_range_add_list_work(&p->svms, parent, mm, op); 1007 - schedule_deferred_list_work(&p->svms); 953 + r, prange->svms, prange, start, last); 1008 954 1009 955 out_unlock_prange: 1010 - if (prange != parent) 1011 - mutex_unlock(&prange->migrate_mutex); 1012 - mutex_unlock(&parent->migrate_mutex); 956 + mutex_unlock(&prange->migrate_mutex); 1013 957 out_unlock_svms: 1014 958 mutex_unlock(&p->svms.lock); 1015 959 out_unref_process:
+4
drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
··· 41 41 }; 42 42 43 43 int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, 44 + unsigned long start, unsigned long last, 44 45 struct mm_struct *mm, uint32_t trigger); 46 + 45 47 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, 48 + unsigned long start, unsigned long last, 46 49 uint32_t trigger, struct page *fault_page); 50 + 47 51 unsigned long 48 52 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); 49 53
+86 -90
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 158 158 static int 159 159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, 160 160 unsigned long offset, unsigned long npages, 161 - unsigned long *hmm_pfns, uint32_t gpuidx) 161 + unsigned long *hmm_pfns, uint32_t gpuidx, uint64_t *vram_pages) 162 162 { 163 163 enum dma_data_direction dir = DMA_BIDIRECTIONAL; 164 164 dma_addr_t *addr = prange->dma_addr[gpuidx]; 165 165 struct device *dev = adev->dev; 166 166 struct page *page; 167 + uint64_t vram_pages_dev; 167 168 int i, r; 168 169 169 170 if (!addr) { ··· 174 173 prange->dma_addr[gpuidx] = addr; 175 174 } 176 175 176 + vram_pages_dev = 0; 177 177 addr += offset; 178 178 for (i = 0; i < npages; i++) { 179 179 if (svm_is_valid_dma_mapping_addr(dev, addr[i])) ··· 184 182 if (is_zone_device_page(page)) { 185 183 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev; 186 184 185 + vram_pages_dev++; 187 186 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + 188 187 bo_adev->vm_manager.vram_base_offset - 189 188 bo_adev->kfd.pgmap.range.start; ··· 201 198 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", 202 199 addr[i] >> PAGE_SHIFT, page_to_pfn(page)); 203 200 } 201 + *vram_pages = vram_pages_dev; 204 202 return 0; 205 203 } 206 204 207 205 static int 208 206 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, 209 207 unsigned long offset, unsigned long npages, 210 - unsigned long *hmm_pfns) 208 + unsigned long *hmm_pfns, uint64_t *vram_pages) 211 209 { 212 210 struct kfd_process *p; 213 211 uint32_t gpuidx; ··· 227 223 } 228 224 229 225 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, 230 - hmm_pfns, gpuidx); 226 + hmm_pfns, gpuidx, vram_pages); 231 227 if (r) 232 228 break; 233 229 } ··· 353 349 INIT_LIST_HEAD(&prange->child_list); 354 350 atomic_set(&prange->invalid, 0); 355 351 prange->validate_timestamp = 0; 352 + prange->vram_pages = 0; 356 353 mutex_init(&prange->migrate_mutex); 357 354 mutex_init(&prange->lock); 358 355 ··· 400 395 prange->start, prange->last); 401 396 mutex_lock(&prange->lock); 402 397 prange->svm_bo = NULL; 398 + /* prange should not hold vram page now */ 399 + WARN_ONCE(prange->actual_loc, "prange should not hold vram page"); 403 400 mutex_unlock(&prange->lock); 404 401 405 402 spin_lock(&svm_bo->list_lock); ··· 982 975 new->svm_bo = svm_range_bo_ref(old->svm_bo); 983 976 new->ttm_res = old->ttm_res; 984 977 978 + /* set new's vram_pages as old range's now, the acurate vram_pages 979 + * will be updated during mapping 980 + */ 981 + new->vram_pages = min(old->vram_pages, new->npages); 982 + 985 983 spin_lock(&new->svm_bo->list_lock); 986 984 list_add(&new->svm_bo_list, &new->svm_bo->range_list); 987 985 spin_unlock(&new->svm_bo->list_lock); ··· 1147 1135 list_add_tail(&pchild->child_list, &prange->child_list); 1148 1136 } 1149 1137 1150 - /** 1151 - * svm_range_split_by_granularity - collect ranges within granularity boundary 1152 - * 1153 - * @p: the process with svms list 1154 - * @mm: mm structure 1155 - * @addr: the vm fault address in pages, to split the prange 1156 - * @parent: parent range if prange is from child list 1157 - * @prange: prange to split 1158 - * 1159 - * Trims @prange to be a single aligned block of prange->granularity if 1160 - * possible. The head and tail are added to the child_list in @parent. 1161 - * 1162 - * Context: caller must hold mmap_read_lock and prange->lock 1163 - * 1164 - * Return: 1165 - * 0 - OK, otherwise error code 1166 - */ 1167 - int 1168 - svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, 1169 - unsigned long addr, struct svm_range *parent, 1170 - struct svm_range *prange) 1171 - { 1172 - struct svm_range *head, *tail; 1173 - unsigned long start, last, size; 1174 - int r; 1175 - 1176 - /* Align splited range start and size to granularity size, then a single 1177 - * PTE will be used for whole range, this reduces the number of PTE 1178 - * updated and the L1 TLB space used for translation. 1179 - */ 1180 - size = 1UL << prange->granularity; 1181 - start = ALIGN_DOWN(addr, size); 1182 - last = ALIGN(addr + 1, size) - 1; 1183 - 1184 - pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n", 1185 - prange->svms, prange->start, prange->last, start, last, size); 1186 - 1187 - if (start > prange->start) { 1188 - r = svm_range_split(prange, start, prange->last, &head); 1189 - if (r) 1190 - return r; 1191 - svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE); 1192 - } 1193 - 1194 - if (last < prange->last) { 1195 - r = svm_range_split(prange, prange->start, last, &tail); 1196 - if (r) 1197 - return r; 1198 - svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); 1199 - } 1200 - 1201 - /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ 1202 - if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) { 1203 - prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP; 1204 - pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n", 1205 - prange, prange->start, prange->last, 1206 - SVM_OP_ADD_RANGE_AND_MAP); 1207 - } 1208 - return 0; 1209 - } 1210 1138 static bool 1211 1139 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) 1212 1140 { ··· 1566 1614 * 5. Release page table (and SVM BO) reservation 1567 1615 */ 1568 1616 static int svm_range_validate_and_map(struct mm_struct *mm, 1617 + unsigned long map_start, unsigned long map_last, 1569 1618 struct svm_range *prange, int32_t gpuidx, 1570 1619 bool intr, bool wait, bool flush_tlb) 1571 1620 { 1572 1621 struct svm_validate_context *ctx; 1573 1622 unsigned long start, end, addr; 1574 1623 struct kfd_process *p; 1624 + uint64_t vram_pages; 1575 1625 void *owner; 1576 1626 int32_t idx; 1577 1627 int r = 0; ··· 1642 1688 } 1643 1689 } 1644 1690 1691 + vram_pages = 0; 1645 1692 start = prange->start << PAGE_SHIFT; 1646 1693 end = (prange->last + 1) << PAGE_SHIFT; 1647 1694 for (addr = start; !r && addr < end; ) { 1648 1695 struct hmm_range *hmm_range; 1696 + unsigned long map_start_vma; 1697 + unsigned long map_last_vma; 1649 1698 struct vm_area_struct *vma; 1699 + uint64_t vram_pages_vma; 1650 1700 unsigned long next = 0; 1651 1701 unsigned long offset; 1652 1702 unsigned long npages; ··· 1679 1721 if (!r) { 1680 1722 offset = (addr - start) >> PAGE_SHIFT; 1681 1723 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, 1682 - hmm_range->hmm_pfns); 1724 + hmm_range->hmm_pfns, &vram_pages_vma); 1683 1725 if (r) 1684 1726 pr_debug("failed %d to dma map range\n", r); 1727 + else 1728 + vram_pages += vram_pages_vma; 1685 1729 } 1686 1730 1687 1731 svm_range_lock(prange); ··· 1697 1737 r = -EAGAIN; 1698 1738 } 1699 1739 1700 - if (!r) 1701 - r = svm_range_map_to_gpus(prange, offset, npages, readonly, 1702 - ctx->bitmap, wait, flush_tlb); 1740 + if (!r) { 1741 + map_start_vma = max(map_start, prange->start + offset); 1742 + map_last_vma = min(map_last, prange->start + offset + npages - 1); 1743 + if (map_start_vma <= map_last_vma) { 1744 + offset = map_start_vma - prange->start; 1745 + npages = map_last_vma - map_start_vma + 1; 1746 + r = svm_range_map_to_gpus(prange, offset, npages, readonly, 1747 + ctx->bitmap, wait, flush_tlb); 1748 + } 1749 + } 1703 1750 1704 1751 if (!r && next == end) 1705 1752 prange->mapped_to_gpu = true; ··· 1714 1747 svm_range_unlock(prange); 1715 1748 1716 1749 addr = next; 1750 + } 1751 + 1752 + if (addr == end) { 1753 + prange->vram_pages = vram_pages; 1754 + 1755 + /* if prange does not include any vram page and it 1756 + * has not released svm_bo drop its svm_bo reference 1757 + * and set its actaul_loc to sys ram 1758 + */ 1759 + if (!vram_pages && prange->ttm_res) { 1760 + prange->actual_loc = 0; 1761 + svm_range_vram_node_free(prange); 1762 + } 1717 1763 } 1718 1764 1719 1765 svm_range_unreserve_bos(ctx); ··· 1812 1832 */ 1813 1833 mutex_lock(&prange->migrate_mutex); 1814 1834 1815 - r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, 1816 - false, true, false); 1835 + r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, 1836 + MAX_GPU_INSTANCE, false, true, false); 1817 1837 if (r) 1818 1838 pr_debug("failed %d to map 0x%lx to gpus\n", r, 1819 1839 prange->start); ··· 1981 2001 new->actual_loc = old->actual_loc; 1982 2002 new->granularity = old->granularity; 1983 2003 new->mapped_to_gpu = old->mapped_to_gpu; 2004 + new->vram_pages = old->vram_pages; 1984 2005 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); 1985 2006 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); 1986 2007 ··· 2889 2908 uint32_t vmid, uint32_t node_id, 2890 2909 uint64_t addr, bool write_fault) 2891 2910 { 2911 + unsigned long start, last, size; 2892 2912 struct mm_struct *mm = NULL; 2893 2913 struct svm_range_list *svms; 2894 2914 struct svm_range *prange; ··· 3025 3043 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr, 3026 3044 write_fault, timestamp); 3027 3045 3028 - if (prange->actual_loc != best_loc) { 3046 + /* Align migration range start and size to granularity size */ 3047 + size = 1UL << prange->granularity; 3048 + start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start); 3049 + last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); 3050 + if (prange->actual_loc != 0 || best_loc != 0) { 3029 3051 migration = true; 3052 + 3030 3053 if (best_loc) { 3031 - r = svm_migrate_to_vram(prange, best_loc, mm, 3032 - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); 3054 + r = svm_migrate_to_vram(prange, best_loc, start, last, 3055 + mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); 3033 3056 if (r) { 3034 3057 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n", 3035 3058 r, addr); 3036 3059 /* Fallback to system memory if migration to 3037 3060 * VRAM failed 3038 3061 */ 3039 - if (prange->actual_loc) 3040 - r = svm_migrate_vram_to_ram(prange, mm, 3041 - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, 3042 - NULL); 3062 + if (prange->actual_loc && prange->actual_loc != best_loc) 3063 + r = svm_migrate_vram_to_ram(prange, mm, start, last, 3064 + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); 3043 3065 else 3044 3066 r = 0; 3045 3067 } 3046 3068 } else { 3047 - r = svm_migrate_vram_to_ram(prange, mm, 3048 - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, 3049 - NULL); 3069 + r = svm_migrate_vram_to_ram(prange, mm, start, last, 3070 + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); 3050 3071 } 3051 3072 if (r) { 3052 3073 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", 3053 - r, svms, prange->start, prange->last); 3074 + r, svms, start, last); 3054 3075 goto out_unlock_range; 3055 3076 } 3056 3077 } 3057 3078 3058 - r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false); 3079 + r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false, 3080 + false, false); 3059 3081 if (r) 3060 3082 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", 3061 - r, svms, prange->start, prange->last); 3083 + r, svms, start, last); 3062 3084 3063 3085 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, 3064 3086 migration); ··· 3408 3422 *migrated = false; 3409 3423 best_loc = svm_range_best_prefetch_location(prange); 3410 3424 3411 - if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED || 3412 - best_loc == prange->actual_loc) 3425 + /* when best_loc is a gpu node and same as prange->actual_loc 3426 + * we still need do migration as prange->actual_loc !=0 does 3427 + * not mean all pages in prange are vram. hmm migrate will pick 3428 + * up right pages during migration. 3429 + */ 3430 + if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) || 3431 + (best_loc == 0 && prange->actual_loc == 0)) 3413 3432 return 0; 3414 3433 3415 3434 if (!best_loc) { 3416 - r = svm_migrate_vram_to_ram(prange, mm, 3435 + r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, 3417 3436 KFD_MIGRATE_TRIGGER_PREFETCH, NULL); 3418 3437 *migrated = !r; 3419 3438 return r; 3420 3439 } 3421 3440 3422 - r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH); 3441 + r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last, 3442 + mm, KFD_MIGRATE_TRIGGER_PREFETCH); 3423 3443 *migrated = !r; 3424 3444 3425 3445 return r; ··· 3480 3488 3481 3489 mutex_lock(&prange->migrate_mutex); 3482 3490 do { 3491 + /* migrate all vram pages in this prange to sys ram 3492 + * after that prange->actual_loc should be zero 3493 + */ 3483 3494 r = svm_migrate_vram_to_ram(prange, mm, 3495 + prange->start, prange->last, 3484 3496 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL); 3485 3497 } while (!r && prange->actual_loc && --retries); 3486 3498 ··· 3608 3612 3609 3613 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu; 3610 3614 3611 - r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, 3612 - true, true, flush_tlb); 3615 + r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, 3616 + MAX_GPU_INSTANCE, true, true, flush_tlb); 3613 3617 if (r) 3614 3618 pr_debug("failed %d to map svm range\n", r); 3615 3619 ··· 3623 3627 pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n", 3624 3628 prange, prange->start, prange->last); 3625 3629 mutex_lock(&prange->migrate_mutex); 3626 - r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, 3627 - true, true, prange->mapped_to_gpu); 3630 + r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, 3631 + MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu); 3628 3632 if (r) 3629 3633 pr_debug("failed %d on remap svm range\n", r); 3630 3634 mutex_unlock(&prange->migrate_mutex);
+5 -4
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
··· 78 78 * @update_list:link list node used to add to update_list 79 79 * @mapping: bo_va mapping structure to create and update GPU page table 80 80 * @npages: number of pages 81 + * @vram_pages: vram pages number in this svm_range 81 82 * @dma_addr: dma mapping address on each GPU for system memory physical page 82 83 * @ttm_res: vram ttm resource map 83 84 * @offset: range start offset within mm_nodes ··· 89 88 * @flags: flags defined as KFD_IOCTL_SVM_FLAG_* 90 89 * @perferred_loc: perferred location, 0 for CPU, or GPU id 91 90 * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id 92 - * @actual_loc: the actual location, 0 for CPU, or GPU id 91 + * @actual_loc: this svm_range location. 0: all pages are from sys ram; 92 + * GPU id: this svm_range may include vram pages from GPU with 93 + * id actual_loc. 93 94 * @granularity:migration granularity, log2 num pages 94 95 * @invalid: not 0 means cpu page table is invalidated 95 96 * @validate_timestamp: system timestamp when range is validated ··· 115 112 struct list_head list; 116 113 struct list_head update_list; 117 114 uint64_t npages; 115 + uint64_t vram_pages; 118 116 dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; 119 117 struct ttm_resource *ttm_res; 120 118 uint64_t offset; ··· 172 168 int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, 173 169 bool clear); 174 170 void svm_range_vram_node_free(struct svm_range *prange); 175 - int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, 176 - unsigned long addr, struct svm_range *parent, 177 - struct svm_range *prange); 178 171 int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, 179 172 uint32_t vmid, uint32_t node_id, uint64_t addr, 180 173 bool write_fault);
+9 -5
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
··· 25 25 26 26 27 27 28 + ifneq ($(CONFIG_DRM_AMD_DC),) 28 29 AMDGPUDM = \ 29 30 amdgpu_dm.o \ 30 31 amdgpu_dm_plane.o \ 31 32 amdgpu_dm_crtc.o \ 32 33 amdgpu_dm_irq.o \ 33 34 amdgpu_dm_mst_types.o \ 34 - amdgpu_dm_color.o 35 + amdgpu_dm_color.o \ 36 + amdgpu_dm_services.o \ 37 + amdgpu_dm_helpers.o \ 38 + amdgpu_dm_pp_smu.o \ 39 + amdgpu_dm_psr.o \ 40 + amdgpu_dm_replay.o \ 41 + amdgpu_dm_wb.o 35 42 36 43 ifdef CONFIG_DRM_AMD_DC_FP 37 44 AMDGPUDM += dc_fpu.o 38 - endif 39 - 40 - ifneq ($(CONFIG_DRM_AMD_DC),) 41 - AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o 42 45 endif 43 46 44 47 AMDGPUDM += amdgpu_dm_hdcp.o ··· 55 52 AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM)) 56 53 57 54 AMD_DISPLAY_FILES += $(AMDGPU_DM) 55 + endif
+325 -34
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 54 54 #include "amdgpu_dm_crtc.h" 55 55 #include "amdgpu_dm_hdcp.h" 56 56 #include <drm/display/drm_hdcp_helper.h> 57 + #include "amdgpu_dm_wb.h" 57 58 #include "amdgpu_pm.h" 58 59 #include "amdgpu_atombios.h" 59 60 ··· 577 576 { 578 577 struct common_irq_params *irq_params = interrupt_params; 579 578 struct amdgpu_device *adev = irq_params->adev; 579 + struct drm_writeback_job *job; 580 580 struct amdgpu_crtc *acrtc; 581 581 unsigned long flags; 582 582 int vrr_active; ··· 585 583 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 586 584 if (!acrtc) 587 585 return; 586 + 587 + if (acrtc->wb_pending) { 588 + if (acrtc->wb_conn) { 589 + spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); 590 + job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, 591 + struct drm_writeback_job, 592 + list_entry); 593 + spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 594 + 595 + if (job) { 596 + unsigned int v_total, refresh_hz; 597 + struct dc_stream_state *stream = acrtc->dm_irq_params.stream; 598 + 599 + v_total = stream->adjust.v_total_max ? 600 + stream->adjust.v_total_max : stream->timing.v_total; 601 + refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * 602 + 100LL, (v_total * stream->timing.h_total)); 603 + mdelay(1000 / refresh_hz); 604 + 605 + drm_writeback_signal_completion(acrtc->wb_conn, 0); 606 + dc_stream_fc_disable_writeback(adev->dm.dc, 607 + acrtc->dm_irq_params.stream, 0); 608 + } 609 + } else 610 + DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__); 611 + acrtc->wb_pending = false; 612 + } 588 613 589 614 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 590 615 ··· 755 726 756 727 drm_connector_list_iter_begin(dev, &iter); 757 728 drm_for_each_connector_iter(connector, &iter) { 729 + 730 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 731 + continue; 732 + 758 733 aconnector = to_amdgpu_dm_connector(connector); 759 734 if (link && aconnector->dc_link == link) { 760 735 if (notify->type == DMUB_NOTIFICATION_HPD) ··· 982 949 983 950 drm_connector_list_iter_begin(dev, &conn_iter); 984 951 drm_for_each_connector_iter(connector, &conn_iter) { 952 + 953 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 954 + continue; 955 + 985 956 aconnector = to_amdgpu_dm_connector(connector); 986 957 if (aconnector->audio_inst != port) 987 958 continue; ··· 1711 1674 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; 1712 1675 init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; 1713 1676 1677 + /* Enable DWB for tested platforms only */ 1678 + if (adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0)) 1679 + init_data.num_virtual_links = 1; 1680 + 1714 1681 INIT_LIST_HEAD(&adev->dm.da_list); 1715 1682 1716 1683 retrieve_dmi_info(&adev->dm); ··· 2292 2251 2293 2252 drm_connector_list_iter_begin(dev, &iter); 2294 2253 drm_for_each_connector_iter(connector, &iter) { 2254 + 2255 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2256 + continue; 2257 + 2295 2258 aconnector = to_amdgpu_dm_connector(connector); 2296 2259 if (aconnector->dc_link->type == dc_connection_mst_branch && 2297 2260 aconnector->mst_mgr.aux) { ··· 2424 2379 2425 2380 drm_connector_list_iter_begin(dev, &iter); 2426 2381 drm_for_each_connector_iter(connector, &iter) { 2382 + 2383 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2384 + continue; 2385 + 2427 2386 aconnector = to_amdgpu_dm_connector(connector); 2428 2387 if (aconnector->dc_link->type != dc_connection_mst_branch || 2429 2388 aconnector->mst_root) ··· 2691 2642 return 0; 2692 2643 } 2693 2644 2694 - struct amdgpu_dm_connector * 2645 + struct drm_connector * 2695 2646 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 2696 2647 struct drm_crtc *crtc) 2697 2648 { ··· 2704 2655 crtc_from_state = new_con_state->crtc; 2705 2656 2706 2657 if (crtc_from_state == crtc) 2707 - return to_amdgpu_dm_connector(connector); 2658 + return connector; 2708 2659 } 2709 2660 2710 2661 return NULL; ··· 2949 2900 /* Do detection*/ 2950 2901 drm_connector_list_iter_begin(ddev, &iter); 2951 2902 drm_for_each_connector_iter(connector, &iter) { 2903 + 2904 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2905 + continue; 2906 + 2952 2907 aconnector = to_amdgpu_dm_connector(connector); 2953 2908 2954 2909 if (!aconnector->dc_link) ··· 3525 3472 3526 3473 list_for_each_entry(connector, 3527 3474 &dev->mode_config.connector_list, head) { 3475 + 3476 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3477 + continue; 3528 3478 3529 3479 aconnector = to_amdgpu_dm_connector(connector); 3530 3480 dc_link = aconnector->dc_link; ··· 4520 4464 continue; 4521 4465 } 4522 4466 4467 + link = dc_get_link_at_index(dm->dc, i); 4468 + 4469 + if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { 4470 + struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); 4471 + 4472 + if (!wbcon) { 4473 + DRM_ERROR("KMS: Failed to allocate writeback connector\n"); 4474 + continue; 4475 + } 4476 + 4477 + if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { 4478 + DRM_ERROR("KMS: Failed to initialize writeback connector\n"); 4479 + kfree(wbcon); 4480 + continue; 4481 + } 4482 + 4483 + link->psr_settings.psr_feature_enabled = false; 4484 + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 4485 + 4486 + continue; 4487 + } 4488 + 4523 4489 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 4524 4490 if (!aconnector) 4525 4491 goto fail; ··· 4559 4481 DRM_ERROR("KMS: Failed to initialize connector\n"); 4560 4482 goto fail; 4561 4483 } 4562 - 4563 - link = dc_get_link_at_index(dm->dc, i); 4564 4484 4565 4485 if (!dc_link_detect_connection_type(link, &new_connection_type)) 4566 4486 DRM_ERROR("KMS: Failed to detect connector\n"); ··· 5240 5164 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5241 5165 return; 5242 5166 5167 + if (new_plane_state->rotation != DRM_MODE_ROTATE_0) 5168 + goto ffu; 5169 + 5243 5170 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5244 5171 clips = drm_plane_get_damage_clips(new_plane_state); 5245 5172 ··· 5569 5490 { 5570 5491 struct dc_crtc_timing *timing_out = &stream->timing; 5571 5492 const struct drm_display_info *info = &connector->display_info; 5572 - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5493 + struct amdgpu_dm_connector *aconnector = NULL; 5573 5494 struct hdmi_vendor_infoframe hv_frame; 5574 5495 struct hdmi_avi_infoframe avi_frame; 5496 + 5497 + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 5498 + aconnector = to_amdgpu_dm_connector(connector); 5575 5499 5576 5500 memset(&hv_frame, 0, sizeof(hv_frame)); 5577 5501 memset(&avi_frame, 0, sizeof(avi_frame)); ··· 5588 5506 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5589 5507 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5590 5508 else if (drm_mode_is_420_also(info, mode_in) 5509 + && aconnector 5591 5510 && aconnector->force_yuv420_output) 5592 5511 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5593 5512 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) ··· 5624 5541 timing_out->hdmi_vic = hv_frame.vic; 5625 5542 } 5626 5543 5627 - if (is_freesync_video_mode(mode_in, aconnector)) { 5544 + if (aconnector && is_freesync_video_mode(mode_in, aconnector)) { 5628 5545 timing_out->h_addressable = mode_in->hdisplay; 5629 5546 timing_out->h_total = mode_in->htotal; 5630 5547 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; ··· 5745 5662 } 5746 5663 5747 5664 static struct dc_sink * 5748 - create_fake_sink(struct amdgpu_dm_connector *aconnector) 5665 + create_fake_sink(struct dc_link *link) 5749 5666 { 5750 5667 struct dc_sink_init_data sink_init_data = { 0 }; 5751 5668 struct dc_sink *sink = NULL; 5752 5669 5753 - sink_init_data.link = aconnector->dc_link; 5754 - sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 5670 + sink_init_data.link = link; 5671 + sink_init_data.sink_signal = link->connector_signal; 5755 5672 5756 5673 sink = dc_sink_create(&sink_init_data); 5757 5674 if (!sink) { ··· 6101 6018 } 6102 6019 6103 6020 static struct dc_stream_state * 6104 - create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6021 + create_stream_for_sink(struct drm_connector *connector, 6105 6022 const struct drm_display_mode *drm_mode, 6106 6023 const struct dm_connector_state *dm_state, 6107 6024 const struct dc_stream_state *old_stream, 6108 6025 int requested_bpc) 6109 6026 { 6027 + struct amdgpu_dm_connector *aconnector = NULL; 6110 6028 struct drm_display_mode *preferred_mode = NULL; 6111 - struct drm_connector *drm_connector; 6112 6029 const struct drm_connector_state *con_state = &dm_state->base; 6113 6030 struct dc_stream_state *stream = NULL; 6114 6031 struct drm_display_mode mode; ··· 6122 6039 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 6123 6040 struct dsc_dec_dpcd_caps dsc_caps; 6124 6041 6042 + struct dc_link *link = NULL; 6125 6043 struct dc_sink *sink = NULL; 6126 6044 6127 6045 drm_mode_init(&mode, drm_mode); 6128 6046 memset(&saved_mode, 0, sizeof(saved_mode)); 6129 6047 6130 - if (aconnector == NULL) { 6131 - DRM_ERROR("aconnector is NULL!\n"); 6048 + if (connector == NULL) { 6049 + DRM_ERROR("connector is NULL!\n"); 6132 6050 return stream; 6133 6051 } 6134 6052 6135 - drm_connector = &aconnector->base; 6053 + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { 6054 + aconnector = NULL; 6055 + aconnector = to_amdgpu_dm_connector(connector); 6056 + link = aconnector->dc_link; 6057 + } else { 6058 + struct drm_writeback_connector *wbcon = NULL; 6059 + struct amdgpu_dm_wb_connector *dm_wbcon = NULL; 6136 6060 6137 - if (!aconnector->dc_sink) { 6138 - sink = create_fake_sink(aconnector); 6061 + wbcon = drm_connector_to_writeback(connector); 6062 + dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); 6063 + link = dm_wbcon->link; 6064 + } 6065 + 6066 + if (!aconnector || !aconnector->dc_sink) { 6067 + sink = create_fake_sink(link); 6139 6068 if (!sink) 6140 6069 return stream; 6070 + 6141 6071 } else { 6142 6072 sink = aconnector->dc_sink; 6143 6073 dc_sink_retain(sink); ··· 6163 6067 goto finish; 6164 6068 } 6165 6069 6070 + /* We leave this NULL for writeback connectors */ 6166 6071 stream->dm_stream_context = aconnector; 6167 6072 6168 6073 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6169 - drm_connector->display_info.hdmi.scdc.scrambling.low_rates; 6074 + connector->display_info.hdmi.scdc.scrambling.low_rates; 6170 6075 6171 - list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 6076 + list_for_each_entry(preferred_mode, &connector->modes, head) { 6172 6077 /* Search for preferred mode */ 6173 6078 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6174 6079 native_mode_found = true; ··· 6178 6081 } 6179 6082 if (!native_mode_found) 6180 6083 preferred_mode = list_first_entry_or_null( 6181 - &aconnector->base.modes, 6084 + &connector->modes, 6182 6085 struct drm_display_mode, 6183 6086 head); 6184 6087 ··· 6192 6095 * and the modelist may not be filled in time. 6193 6096 */ 6194 6097 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6195 - } else { 6098 + } else if (aconnector) { 6196 6099 recalculate_timing = is_freesync_video_mode(&mode, aconnector); 6197 6100 if (recalculate_timing) { 6198 6101 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); ··· 6215 6118 */ 6216 6119 if (!scale || mode_refresh != preferred_refresh) 6217 6120 fill_stream_properties_from_drm_display_mode( 6218 - stream, &mode, &aconnector->base, con_state, NULL, 6121 + stream, &mode, connector, con_state, NULL, 6219 6122 requested_bpc); 6220 6123 else 6221 6124 fill_stream_properties_from_drm_display_mode( 6222 - stream, &mode, &aconnector->base, con_state, old_stream, 6125 + stream, &mode, connector, con_state, old_stream, 6223 6126 requested_bpc); 6127 + 6128 + /* The rest isn't needed for writeback connectors */ 6129 + if (!aconnector) 6130 + goto finish; 6224 6131 6225 6132 if (aconnector->timing_changed) { 6226 6133 drm_dbg(aconnector->base.dev, ··· 6243 6142 6244 6143 fill_audio_info( 6245 6144 &stream->audio_info, 6246 - drm_connector, 6145 + connector, 6247 6146 sink); 6248 6147 6249 6148 update_stream_signal(stream, sink); ··· 6711 6610 enum dc_status dc_result = DC_OK; 6712 6611 6713 6612 do { 6714 - stream = create_stream_for_sink(aconnector, drm_mode, 6613 + stream = create_stream_for_sink(connector, drm_mode, 6715 6614 dm_state, old_stream, 6716 6615 requested_bpc); 6717 6616 if (stream == NULL) { 6718 6617 DRM_ERROR("Failed to create stream for sink!\n"); 6719 6618 break; 6720 6619 } 6620 + 6621 + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 6622 + return stream; 6721 6623 6722 6624 dc_result = dc_validate_stream(adev->dm.dc, stream); 6723 6625 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) ··· 7041 6937 int vcpi, pbn_div, pbn, slot_num = 0; 7042 6938 7043 6939 for_each_new_connector_in_state(state, connector, new_con_state, i) { 6940 + 6941 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 6942 + continue; 7044 6943 7045 6944 aconnector = to_amdgpu_dm_connector(connector); 7046 6945 ··· 7650 7543 struct dc_link *link = dc_get_link_at_index(dc, link_index); 7651 7544 struct amdgpu_i2c_adapter *i2c; 7652 7545 7546 + /* Not needed for writeback connector */ 7653 7547 link->priv = aconnector; 7654 7548 7655 7549 ··· 8605 8497 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 8606 8498 continue; 8607 8499 8500 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 8501 + continue; 8502 + 8608 8503 notify: 8609 8504 aconnector = to_amdgpu_dm_connector(connector); 8610 8505 ··· 8641 8530 if (!status) 8642 8531 continue; 8643 8532 8533 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 8534 + continue; 8535 + 8644 8536 aconnector = to_amdgpu_dm_connector(connector); 8645 8537 8646 8538 mutex_lock(&adev->dm.audio_lock); ··· 8669 8555 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 8670 8556 } 8671 8557 8558 + static void dm_clear_writeback(struct amdgpu_display_manager *dm, 8559 + struct dm_crtc_state *crtc_state) 8560 + { 8561 + dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); 8562 + } 8563 + 8672 8564 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, 8673 8565 struct dc_state *dc_state) 8674 8566 { ··· 8684 8564 struct drm_crtc *crtc; 8685 8565 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 8686 8566 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 8567 + struct drm_connector_state *old_con_state; 8568 + struct drm_connector *connector; 8687 8569 bool mode_set_reset_required = false; 8688 8570 u32 i; 8571 + 8572 + /* Disable writeback */ 8573 + for_each_old_connector_in_state(state, connector, old_con_state, i) { 8574 + struct dm_connector_state *dm_old_con_state; 8575 + struct amdgpu_crtc *acrtc; 8576 + 8577 + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 8578 + continue; 8579 + 8580 + old_crtc_state = NULL; 8581 + 8582 + dm_old_con_state = to_dm_connector_state(old_con_state); 8583 + if (!dm_old_con_state->base.crtc) 8584 + continue; 8585 + 8586 + acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); 8587 + if (acrtc) 8588 + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8589 + 8590 + if (!acrtc->wb_enabled) 8591 + continue; 8592 + 8593 + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8594 + 8595 + dm_clear_writeback(dm, dm_old_crtc_state); 8596 + acrtc->wb_enabled = false; 8597 + } 8689 8598 8690 8599 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 8691 8600 new_crtc_state, i) { ··· 8852 8703 } 8853 8704 } 8854 8705 8706 + static void dm_set_writeback(struct amdgpu_display_manager *dm, 8707 + struct dm_crtc_state *crtc_state, 8708 + struct drm_connector *connector, 8709 + struct drm_connector_state *new_con_state) 8710 + { 8711 + struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); 8712 + struct amdgpu_device *adev = dm->adev; 8713 + struct amdgpu_crtc *acrtc; 8714 + struct dc_writeback_info *wb_info; 8715 + struct pipe_ctx *pipe = NULL; 8716 + struct amdgpu_framebuffer *afb; 8717 + int i = 0; 8718 + 8719 + wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); 8720 + if (!wb_info) { 8721 + DRM_ERROR("Failed to allocate wb_info\n"); 8722 + return; 8723 + } 8724 + 8725 + acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); 8726 + if (!acrtc) { 8727 + DRM_ERROR("no amdgpu_crtc found\n"); 8728 + return; 8729 + } 8730 + 8731 + afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); 8732 + if (!afb) { 8733 + DRM_ERROR("No amdgpu_framebuffer found\n"); 8734 + return; 8735 + } 8736 + 8737 + for (i = 0; i < MAX_PIPES; i++) { 8738 + if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { 8739 + pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; 8740 + break; 8741 + } 8742 + } 8743 + 8744 + /* fill in wb_info */ 8745 + wb_info->wb_enabled = true; 8746 + 8747 + wb_info->dwb_pipe_inst = 0; 8748 + wb_info->dwb_params.dwbscl_black_color = 0; 8749 + wb_info->dwb_params.hdr_mult = 0x1F000; 8750 + wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; 8751 + wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; 8752 + wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; 8753 + wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; 8754 + 8755 + /* width & height from crtc */ 8756 + wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; 8757 + wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; 8758 + wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; 8759 + wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; 8760 + 8761 + wb_info->dwb_params.cnv_params.crop_en = false; 8762 + wb_info->dwb_params.stereo_params.stereo_enabled = false; 8763 + 8764 + wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits 8765 + wb_info->dwb_params.cnv_params.out_min_pix_val = 0; 8766 + wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; 8767 + wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; 8768 + 8769 + wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; 8770 + 8771 + wb_info->dwb_params.capture_rate = dwb_capture_rate_0; 8772 + 8773 + wb_info->dwb_params.scaler_taps.h_taps = 4; 8774 + wb_info->dwb_params.scaler_taps.v_taps = 4; 8775 + wb_info->dwb_params.scaler_taps.h_taps_c = 2; 8776 + wb_info->dwb_params.scaler_taps.v_taps_c = 2; 8777 + wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; 8778 + 8779 + wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; 8780 + wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; 8781 + 8782 + for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { 8783 + wb_info->mcif_buf_params.luma_address[i] = afb->address; 8784 + wb_info->mcif_buf_params.chroma_address[i] = 0; 8785 + } 8786 + 8787 + wb_info->mcif_buf_params.p_vmid = 1; 8788 + if (adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0)) { 8789 + wb_info->mcif_warmup_params.start_address.quad_part = afb->address; 8790 + wb_info->mcif_warmup_params.region_size = 8791 + wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; 8792 + } 8793 + wb_info->mcif_warmup_params.p_vmid = 1; 8794 + wb_info->writeback_source_plane = pipe->plane_state; 8795 + 8796 + dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); 8797 + 8798 + acrtc->wb_pending = true; 8799 + acrtc->wb_conn = wb_conn; 8800 + drm_writeback_queue_job(wb_conn, new_con_state); 8801 + } 8802 + 8855 8803 /** 8856 8804 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 8857 8805 * @state: The atomic state to commit ··· 8999 8753 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9000 8754 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9001 8755 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9002 - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8756 + struct amdgpu_dm_connector *aconnector; 8757 + 8758 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 8759 + continue; 8760 + 8761 + aconnector = to_amdgpu_dm_connector(connector); 9003 8762 9004 8763 if (!adev->dm.hdcp_workqueue) 9005 8764 continue; ··· 9281 9030 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); 9282 9031 } 9283 9032 9033 + /* Enable writeback */ 9034 + for_each_new_connector_in_state(state, connector, new_con_state, i) { 9035 + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9036 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9037 + 9038 + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 9039 + continue; 9040 + 9041 + if (!new_con_state->writeback_job) 9042 + continue; 9043 + 9044 + new_crtc_state = NULL; 9045 + 9046 + if (acrtc) 9047 + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9048 + 9049 + if (acrtc->wb_enabled) 9050 + continue; 9051 + 9052 + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9053 + 9054 + dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); 9055 + acrtc->wb_enabled = true; 9056 + } 9057 + 9284 9058 /* Update audio instances for each connector. */ 9285 9059 amdgpu_dm_commit_audio(dev, state); 9286 9060 ··· 9423 9147 void dm_restore_drm_connector_state(struct drm_device *dev, 9424 9148 struct drm_connector *connector) 9425 9149 { 9426 - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9150 + struct amdgpu_dm_connector *aconnector; 9427 9151 struct amdgpu_crtc *disconnected_acrtc; 9428 9152 struct dm_crtc_state *acrtc_state; 9153 + 9154 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9155 + return; 9156 + 9157 + aconnector = to_amdgpu_dm_connector(connector); 9429 9158 9430 9159 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 9431 9160 return; ··· 9508 9227 struct dm_connector_state *new_con_state) 9509 9228 { 9510 9229 struct mod_freesync_config config = {0}; 9511 - struct amdgpu_dm_connector *aconnector = 9512 - to_amdgpu_dm_connector(new_con_state->base.connector); 9230 + struct amdgpu_dm_connector *aconnector; 9513 9231 struct drm_display_mode *mode = &new_crtc_state->base.mode; 9514 9232 int vrefresh = drm_mode_vrefresh(mode); 9515 9233 bool fs_vid_mode = false; 9234 + 9235 + if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9236 + return; 9237 + 9238 + aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); 9516 9239 9517 9240 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 9518 9241 vrefresh >= aconnector->min_vfreq && ··· 9617 9332 * update changed items 9618 9333 */ 9619 9334 struct amdgpu_crtc *acrtc = NULL; 9335 + struct drm_connector *connector = NULL; 9620 9336 struct amdgpu_dm_connector *aconnector = NULL; 9621 9337 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 9622 9338 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; ··· 9627 9341 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9628 9342 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9629 9343 acrtc = to_amdgpu_crtc(crtc); 9630 - aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 9344 + connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 9345 + if (connector) 9346 + aconnector = to_amdgpu_dm_connector(connector); 9631 9347 9632 9348 /* TODO This hack should go away */ 9633 - if (aconnector && enable) { 9349 + if (connector && enable) { 9634 9350 /* Make sure fake sink is created in plug-in scenario */ 9635 9351 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 9636 - &aconnector->base); 9352 + connector); 9637 9353 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 9638 - &aconnector->base); 9354 + connector); 9639 9355 9640 9356 if (IS_ERR(drm_new_conn_state)) { 9641 9357 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); ··· 9784 9496 * added MST connectors not found in existing crtc_state in the chained mode 9785 9497 * TODO: need to dig out the root cause of that 9786 9498 */ 9787 - if (!aconnector) 9499 + if (!connector) 9788 9500 goto skip_modeset; 9789 9501 9790 9502 if (modereset_required(new_crtc_state)) ··· 9827 9539 * We want to do dc stream updates that do not require a 9828 9540 * full modeset below. 9829 9541 */ 9830 - if (!(enable && aconnector && new_crtc_state->active)) 9542 + if (!(enable && connector && new_crtc_state->active)) 9831 9543 return 0; 9832 9544 /* 9833 9545 * Given above conditions, the dc state cannot be NULL because: ··· 10348 10060 conn_state = old_conn_state; 10349 10061 10350 10062 if (conn_state->crtc != crtc) 10063 + continue; 10064 + 10065 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10351 10066 continue; 10352 10067 10353 10068 aconnector = to_amdgpu_dm_connector(connector);
+9 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 32 32 #include <drm/drm_crtc.h> 33 33 #include <drm/drm_plane.h> 34 34 #include "link_service_types.h" 35 + #include <drm/drm_writeback.h> 35 36 36 37 /* 37 38 * This file contains the definition for amdgpu_display_manager ··· 715 714 716 715 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) 717 716 717 + struct amdgpu_dm_wb_connector { 718 + struct drm_writeback_connector base; 719 + struct dc_link *link; 720 + }; 721 + 722 + #define to_amdgpu_dm_wb_connector(x) container_of(x, struct amdgpu_dm_wb_connector, base) 723 + 718 724 extern const struct amdgpu_ip_block_version dm_ip_block; 719 725 720 726 struct dm_plane_state { ··· 842 834 int dm_atomic_get_state(struct drm_atomic_state *state, 843 835 struct dm_atomic_state **dm_state); 844 836 845 - struct amdgpu_dm_connector * 837 + struct drm_connector * 846 838 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 847 839 struct drm_crtc *crtc); 848 840
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
··· 326 326 if (!connector->state || connector->state->crtc != crtc) 327 327 continue; 328 328 329 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 330 + continue; 331 + 329 332 aconn = to_amdgpu_dm_connector(connector); 330 333 break; 331 334 }
+16 -6
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
··· 894 894 895 895 drm_connector_list_iter_begin(dev, &iter); 896 896 drm_for_each_connector_iter(connector, &iter) { 897 - struct amdgpu_dm_connector *amdgpu_dm_connector = 898 - to_amdgpu_dm_connector(connector); 897 + struct amdgpu_dm_connector *amdgpu_dm_connector; 898 + const struct dc_link *dc_link; 899 899 900 - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; 900 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 901 + continue; 902 + 903 + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 904 + 905 + dc_link = amdgpu_dm_connector->dc_link; 901 906 902 907 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 903 908 dc_interrupt_set(adev->dm.dc, ··· 935 930 936 931 drm_connector_list_iter_begin(dev, &iter); 937 932 drm_for_each_connector_iter(connector, &iter) { 938 - struct amdgpu_dm_connector *amdgpu_dm_connector = 939 - to_amdgpu_dm_connector(connector); 940 - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; 933 + struct amdgpu_dm_connector *amdgpu_dm_connector; 934 + const struct dc_link *dc_link; 935 + 936 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 937 + continue; 938 + 939 + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 940 + dc_link = amdgpu_dm_connector->dc_link; 941 941 942 942 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 943 943 dc_interrupt_set(adev->dm.dc,
+3 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 1500 1500 int ind = find_crtc_index_in_state_by_stream(state, stream); 1501 1501 1502 1502 if (ind >= 0) { 1503 + struct drm_connector *connector; 1503 1504 struct amdgpu_dm_connector *aconnector; 1504 1505 struct drm_connector_state *drm_new_conn_state; 1505 1506 struct dm_connector_state *dm_new_conn_state; 1506 1507 struct dm_crtc_state *dm_old_crtc_state; 1507 1508 1508 - aconnector = 1509 + connector = 1509 1510 amdgpu_dm_find_first_crtc_matching_connector(state, 1510 1511 state->crtcs[ind].ptr); 1512 + aconnector = to_amdgpu_dm_connector(connector); 1511 1513 drm_new_conn_state = 1512 1514 drm_atomic_get_new_connector_state(state, 1513 1515 &aconnector->base);
+215
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2022 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: AMD 24 + * 25 + */ 26 + 27 + #include "dm_services_types.h" 28 + 29 + #include "amdgpu.h" 30 + #include "amdgpu_dm.h" 31 + #include "amdgpu_dm_wb.h" 32 + #include "amdgpu_display.h" 33 + #include "dc.h" 34 + 35 + #include <drm/drm_atomic_state_helper.h> 36 + #include <drm/drm_modeset_helper_vtables.h> 37 + 38 + static const u32 amdgpu_dm_wb_formats[] = { 39 + DRM_FORMAT_XRGB2101010, 40 + }; 41 + 42 + static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder, 43 + struct drm_crtc_state *crtc_state, 44 + struct drm_connector_state *conn_state) 45 + { 46 + struct drm_framebuffer *fb; 47 + const struct drm_display_mode *mode = &crtc_state->mode; 48 + bool found = false; 49 + uint8_t i; 50 + 51 + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) 52 + return 0; 53 + 54 + fb = conn_state->writeback_job->fb; 55 + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { 56 + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", 57 + fb->width, fb->height); 58 + return -EINVAL; 59 + } 60 + 61 + for (i = 0; i < sizeof(amdgpu_dm_wb_formats) / sizeof(u32); i++) { 62 + if (fb->format->format == amdgpu_dm_wb_formats[i]) 63 + found = true; 64 + } 65 + 66 + if (!found) { 67 + DRM_DEBUG_KMS("Invalid pixel format %p4cc\n", 68 + &fb->format->format); 69 + return -EINVAL; 70 + } 71 + 72 + return 0; 73 + } 74 + 75 + 76 + static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector) 77 + { 78 + struct drm_device *dev = connector->dev; 79 + 80 + return drm_add_modes_noedid(connector, dev->mode_config.max_width, 81 + dev->mode_config.max_height); 82 + } 83 + 84 + static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector, 85 + struct drm_writeback_job *job) 86 + { 87 + struct amdgpu_framebuffer *afb; 88 + struct drm_gem_object *obj; 89 + struct amdgpu_device *adev; 90 + struct amdgpu_bo *rbo; 91 + uint32_t domain; 92 + int r; 93 + 94 + if (!job->fb) { 95 + DRM_DEBUG_KMS("No FB bound\n"); 96 + return 0; 97 + } 98 + 99 + afb = to_amdgpu_framebuffer(job->fb); 100 + obj = job->fb->obj[0]; 101 + rbo = gem_to_amdgpu_bo(obj); 102 + adev = amdgpu_ttm_adev(rbo->tbo.bdev); 103 + 104 + r = amdgpu_bo_reserve(rbo, true); 105 + if (r) { 106 + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 107 + return r; 108 + } 109 + 110 + r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 111 + if (r) { 112 + dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); 113 + goto error_unlock; 114 + } 115 + 116 + domain = amdgpu_display_supported_domains(adev, rbo->flags); 117 + 118 + r = amdgpu_bo_pin(rbo, domain); 119 + if (unlikely(r != 0)) { 120 + if (r != -ERESTARTSYS) 121 + DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 122 + goto error_unlock; 123 + } 124 + 125 + r = amdgpu_ttm_alloc_gart(&rbo->tbo); 126 + if (unlikely(r != 0)) { 127 + DRM_ERROR("%p bind failed\n", rbo); 128 + goto error_unpin; 129 + } 130 + 131 + amdgpu_bo_unreserve(rbo); 132 + 133 + afb->address = amdgpu_bo_gpu_offset(rbo); 134 + 135 + amdgpu_bo_ref(rbo); 136 + 137 + return 0; 138 + 139 + error_unpin: 140 + amdgpu_bo_unpin(rbo); 141 + 142 + error_unlock: 143 + amdgpu_bo_unreserve(rbo); 144 + return r; 145 + } 146 + 147 + static void amdgpu_dm_wb_cleanup_job(struct drm_writeback_connector *connector, 148 + struct drm_writeback_job *job) 149 + { 150 + struct amdgpu_bo *rbo; 151 + int r; 152 + 153 + if (!job->fb) 154 + return; 155 + 156 + rbo = gem_to_amdgpu_bo(job->fb->obj[0]); 157 + r = amdgpu_bo_reserve(rbo, false); 158 + if (unlikely(r)) { 159 + DRM_ERROR("failed to reserve rbo before unpin\n"); 160 + return; 161 + } 162 + 163 + amdgpu_bo_unpin(rbo); 164 + amdgpu_bo_unreserve(rbo); 165 + amdgpu_bo_unref(&rbo); 166 + } 167 + 168 + static const struct drm_encoder_helper_funcs amdgpu_dm_wb_encoder_helper_funcs = { 169 + .atomic_check = amdgpu_dm_wb_encoder_atomic_check, 170 + }; 171 + 172 + static const struct drm_connector_funcs amdgpu_dm_wb_connector_funcs = { 173 + .fill_modes = drm_helper_probe_single_connector_modes, 174 + .destroy = drm_connector_cleanup, 175 + .reset = amdgpu_dm_connector_funcs_reset, 176 + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 177 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 178 + }; 179 + 180 + static const struct drm_connector_helper_funcs amdgpu_dm_wb_conn_helper_funcs = { 181 + .get_modes = amdgpu_dm_wb_connector_get_modes, 182 + .prepare_writeback_job = amdgpu_dm_wb_prepare_job, 183 + .cleanup_writeback_job = amdgpu_dm_wb_cleanup_job, 184 + }; 185 + 186 + int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, 187 + struct amdgpu_dm_wb_connector *wbcon, 188 + uint32_t link_index) 189 + { 190 + struct dc *dc = dm->dc; 191 + struct dc_link *link = dc_get_link_at_index(dc, link_index); 192 + int res = 0; 193 + 194 + wbcon->link = link; 195 + 196 + drm_connector_helper_add(&wbcon->base.base, &amdgpu_dm_wb_conn_helper_funcs); 197 + 198 + res = drm_writeback_connector_init(&dm->adev->ddev, &wbcon->base, 199 + &amdgpu_dm_wb_connector_funcs, 200 + &amdgpu_dm_wb_encoder_helper_funcs, 201 + amdgpu_dm_wb_formats, 202 + ARRAY_SIZE(amdgpu_dm_wb_formats), 203 + amdgpu_dm_get_encoder_crtc_mask(dm->adev)); 204 + 205 + if (res) 206 + return res; 207 + /* 208 + * Some of the properties below require access to state, like bpc. 209 + * Allocate some default initial connector state with our reset helper. 210 + */ 211 + if (wbcon->base.base.funcs->reset) 212 + wbcon->base.base.funcs->reset(&wbcon->base.base); 213 + 214 + return 0; 215 + }
+36
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright 2022 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: AMD 24 + * 25 + */ 26 + 27 + #ifndef __AMDGPU_DM_WB_H__ 28 + #define __AMDGPU_DM_WB_H__ 29 + 30 + #include <drm/drm_writeback.h> 31 + 32 + int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, 33 + struct amdgpu_dm_wb_connector *dm_wbcon, 34 + uint32_t link_index); 35 + 36 + #endif
+30 -32
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
··· 1691 1691 static enum bp_result bios_parser_enable_lvtma_control( 1692 1692 struct dc_bios *dcb, 1693 1693 uint8_t uc_pwr_on, 1694 - uint8_t panel_instance, 1694 + uint8_t pwrseq_instance, 1695 1695 uint8_t bypass_panel_control_wait) 1696 1696 { 1697 1697 struct bios_parser *bp = BP_FROM_DCB(dcb); ··· 1699 1699 if (!bp->cmd_tbl.enable_lvtma_control) 1700 1700 return BP_RESULT_FAILURE; 1701 1701 1702 - return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait); 1702 + return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, pwrseq_instance, bypass_panel_control_wait); 1703 1703 } 1704 1704 1705 1705 static bool bios_parser_is_accelerated_mode( ··· 1747 1747 result = get_firmware_info_v3_2(bp, info); 1748 1748 break; 1749 1749 case 4: 1750 + case 5: 1750 1751 result = get_firmware_info_v3_4(bp, info); 1751 1752 break; 1752 1753 default: ··· 2215 2214 2216 2215 switch (bp->object_info_tbl.revision.minor) { 2217 2216 case 4: 2218 - default: 2219 - object = get_bios_object(bp, object_id); 2217 + default: 2218 + object = get_bios_object(bp, object_id); 2220 2219 2221 - if (!object) 2222 - return BP_RESULT_BADINPUT; 2220 + if (!object) 2221 + return BP_RESULT_BADINPUT; 2223 2222 2224 - record = get_disp_connector_caps_record(bp, object); 2225 - if (!record) 2226 - return BP_RESULT_NORECORD; 2223 + record = get_disp_connector_caps_record(bp, object); 2224 + if (!record) 2225 + return BP_RESULT_NORECORD; 2227 2226 2228 - info->INTERNAL_DISPLAY = 2229 - (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; 2230 - info->INTERNAL_DISPLAY_BL = 2231 - (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; 2232 - break; 2233 - case 5: 2227 + info->INTERNAL_DISPLAY = 2228 + (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; 2229 + info->INTERNAL_DISPLAY_BL = 2230 + (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; 2231 + break; 2232 + case 5: 2234 2233 object_path_v3 = get_bios_object_from_path_v3(bp, object_id); 2235 2234 2236 2235 if (!object_path_v3) ··· 3330 3329 DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n"); 3331 3330 return BP_RESULT_BADINPUT; 3332 3331 } 3332 + 3333 3333 tbl = &bp->object_info_tbl; 3334 3334 v1_4 = tbl->v1_4; 3335 3335 v1_5 = tbl->v1_5; 3336 3336 3337 3337 result = BP_RESULT_NORECORD; 3338 3338 switch (bp->object_info_tbl.revision.minor) { 3339 - case 4: 3340 - default: 3341 - for (i = 0; i < v1_4->number_of_path; ++i) { 3342 - if (bracket_layout_id == 3343 - v1_4->display_path[i].display_objid) { 3344 - result = update_slot_layout_info(dcb, i, slot_layout_info); 3345 - break; 3346 - } 3339 + case 4: 3340 + default: 3341 + for (i = 0; i < v1_4->number_of_path; ++i) { 3342 + if (bracket_layout_id == v1_4->display_path[i].display_objid) { 3343 + result = update_slot_layout_info(dcb, i, slot_layout_info); 3344 + break; 3347 3345 } 3348 - break; 3349 - case 5: 3350 - for (i = 0; i < v1_5->number_of_path; ++i) 3351 - result = update_slot_layout_info_v2(dcb, i, slot_layout_info); 3352 - break; 3346 + } 3347 + break; 3348 + case 5: 3349 + for (i = 0; i < v1_5->number_of_path; ++i) 3350 + result = update_slot_layout_info_v2(dcb, i, slot_layout_info); 3351 + break; 3353 3352 } 3353 + 3354 3354 return result; 3355 3355 } 3356 3356 ··· 3360 3358 struct board_layout_info *board_layout_info) 3361 3359 { 3362 3360 unsigned int i; 3363 - 3364 3361 struct bios_parser *bp; 3365 - 3366 3362 static enum bp_result record_result; 3367 3363 unsigned int max_slots; 3368 3364 ··· 3369 3369 GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2, 3370 3370 0, 0 3371 3371 }; 3372 - 3373 3372 3374 3373 bp = BP_FROM_DCB(dcb); 3375 3374 ··· 3550 3551 .bios_parser_destroy = firmware_parser_destroy, 3551 3552 3552 3553 .get_board_layout_info = bios_get_board_layout_info, 3553 - /* TODO: use this fn in hw init?*/ 3554 3554 .pack_data_tables = bios_parser_pack_data_tables, 3555 3555 3556 3556 .get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
+6 -6
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
··· 976 976 static enum bp_result enable_lvtma_control( 977 977 struct bios_parser *bp, 978 978 uint8_t uc_pwr_on, 979 - uint8_t panel_instance, 979 + uint8_t pwrseq_instance, 980 980 uint8_t bypass_panel_control_wait); 981 981 982 982 static void init_enable_lvtma_control(struct bios_parser *bp) ··· 989 989 static void enable_lvtma_control_dmcub( 990 990 struct dc_dmub_srv *dmcub, 991 991 uint8_t uc_pwr_on, 992 - uint8_t panel_instance, 992 + uint8_t pwrseq_instance, 993 993 uint8_t bypass_panel_control_wait) 994 994 { 995 995 ··· 1002 1002 DMUB_CMD__VBIOS_LVTMA_CONTROL; 1003 1003 cmd.lvtma_control.data.uc_pwr_action = 1004 1004 uc_pwr_on; 1005 - cmd.lvtma_control.data.panel_inst = 1006 - panel_instance; 1005 + cmd.lvtma_control.data.pwrseq_inst = 1006 + pwrseq_instance; 1007 1007 cmd.lvtma_control.data.bypass_panel_control_wait = 1008 1008 bypass_panel_control_wait; 1009 1009 dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); ··· 1012 1012 static enum bp_result enable_lvtma_control( 1013 1013 struct bios_parser *bp, 1014 1014 uint8_t uc_pwr_on, 1015 - uint8_t panel_instance, 1015 + uint8_t pwrseq_instance, 1016 1016 uint8_t bypass_panel_control_wait) 1017 1017 { 1018 1018 enum bp_result result = BP_RESULT_FAILURE; ··· 1021 1021 bp->base.ctx->dc->debug.dmub_command_table) { 1022 1022 enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv, 1023 1023 uc_pwr_on, 1024 - panel_instance, 1024 + pwrseq_instance, 1025 1025 bypass_panel_control_wait); 1026 1026 return BP_RESULT_OK; 1027 1027 }
+1 -1
drivers/gpu/drm/amd/display/dc/bios/command_table2.h
··· 96 96 struct bios_parser *bp, uint8_t id); 97 97 enum bp_result (*enable_lvtma_control)(struct bios_parser *bp, 98 98 uint8_t uc_pwr_on, 99 - uint8_t panel_instance, 99 + uint8_t pwrseq_instance, 100 100 uint8_t bypass_panel_control_wait); 101 101 }; 102 102
+16 -10
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
··· 460 460 461 461 static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr) 462 462 { 463 - unsigned int dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK 464 - unsigned int dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK 465 - unsigned int dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK 466 - unsigned int dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK 467 - unsigned int dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK 468 - unsigned int fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK 463 + unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg, 464 + fclk_khz_reg; 465 + int dramclk_khz_override, fclk_khz_override, num_fclk_levels; 466 + 467 + msleep(5); 468 + 469 + dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK 470 + dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK 471 + dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK 472 + dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK 473 + dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK 474 + fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK 469 475 470 476 // Overrides for these clocks in case there is no p_state change support 471 - int dramclk_khz_override = new_clocks->dramclk_khz; 472 - int fclk_khz_override = new_clocks->fclk_khz; 477 + dramclk_khz_override = new_clocks->dramclk_khz; 478 + fclk_khz_override = new_clocks->fclk_khz; 473 479 474 - int num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; 480 + num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; 475 481 476 482 if (!new_clocks->p_state_change_support) { 477 483 dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000; ··· 713 707 dmcu->funcs->set_psr_wait_loop(dmcu, 714 708 clk_mgr_base->clks.dispclk_khz / 1000 / 7); 715 709 716 - if (dc->config.enable_auto_dpm_test_logs) { 710 + if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) { 717 711 dcn32_auto_dpm_test_log(new_clocks, clk_mgr); 718 712 } 719 713 }
+9 -16
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
··· 80 80 81 81 static int dcn35_get_active_display_cnt_wa( 82 82 struct dc *dc, 83 - struct dc_state *context) 83 + struct dc_state *context, 84 + int *all_active_disps) 84 85 { 85 - int i, display_count; 86 + int i, display_count = 0; 86 87 bool tmds_present = false; 87 88 88 - display_count = 0; 89 89 for (i = 0; i < context->stream_count; i++) { 90 90 const struct dc_stream_state *stream = context->streams[i]; 91 91 ··· 103 103 link->link_enc->funcs->is_dig_enabled(link->link_enc)) 104 104 display_count++; 105 105 } 106 - 106 + if (all_active_disps != NULL) 107 + *all_active_disps = display_count; 107 108 /* WA for hang on HDMI after display off back on*/ 108 109 if (display_count == 0 && tmds_present) 109 110 display_count = 1; ··· 127 126 continue; 128 127 if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || 129 128 !pipe->stream->link_enc)) { 130 - struct stream_encoder *stream_enc = pipe->stream_res.stream_enc; 131 - 132 129 if (disable) { 133 - if (stream_enc && stream_enc->funcs->disable_fifo) 134 - pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc); 135 - 136 130 if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) 137 131 pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); 138 132 139 133 reset_sync_context_for_pipe(dc, context, i); 140 134 } else { 141 135 pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); 142 - 143 - if (stream_enc && stream_enc->funcs->enable_fifo) 144 - pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc); 145 136 } 146 137 } 147 138 } ··· 217 224 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 218 225 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; 219 226 struct dc *dc = clk_mgr_base->ctx->dc; 220 - int display_count; 227 + int display_count = 0; 221 228 bool update_dppclk = false; 222 229 bool update_dispclk = false; 223 230 bool dpp_clock_lowered = false; 231 + int all_active_disps = 0; 224 232 225 233 if (dc->work_arounds.skip_clock_update) 226 234 return; 227 235 228 - /* DTBCLK is fixed, so set a default if unspecified. */ 236 + display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps); 229 237 if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz) 230 238 new_clocks->ref_dtbclk_khz = 600000; 231 239 ··· 248 254 } 249 255 /* check that we're not already in lower */ 250 256 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { 251 - display_count = dcn35_get_active_display_cnt_wa(dc, context); 252 257 /* if we can go lower, go lower */ 253 258 if (display_count == 0) 254 259 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; ··· 819 826 struct dc_state *context = dc->current_state; 820 827 821 828 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { 822 - display_count = dcn35_get_active_display_cnt_wa(dc, context); 829 + display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL); 823 830 /* if we can go lower, go lower */ 824 831 if (display_count == 0) 825 832 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+32 -11
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
··· 279 279 clk_mgr, 280 280 VBIOSSMC_MSG_SetDisplayIdleOptimizations, 281 281 idle_info); 282 - smu_print("VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %d\n", idle_info); 282 + smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %x\n", __func__, idle_info); 283 283 } 284 284 285 285 void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) ··· 298 298 clk_mgr, 299 299 VBIOSSMC_MSG_SetDisplayIdleOptimizations, 300 300 idle_info.data); 301 - smu_print("dcn35_smu_enable_phy_refclk_pwrdwn = %d\n", enable ? 1 : 0); 301 + smu_print("%s smu_enable_phy_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0); 302 302 } 303 303 304 304 void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) ··· 310 310 clk_mgr, 311 311 VBIOSSMC_MSG_UpdatePmeRestore, 312 312 0); 313 + smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__); 313 314 } 314 315 315 316 void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high) ··· 351 350 352 351 void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support) 353 352 { 354 - unsigned int msg_id, param; 353 + unsigned int msg_id, param, retv; 355 354 356 355 if (!clk_mgr->smu_present) 357 356 return; ··· 361 360 case DCN_ZSTATE_SUPPORT_ALLOW: 362 361 msg_id = VBIOSSMC_MSG_AllowZstatesEntry; 363 362 param = (1 << 10) | (1 << 9) | (1 << 8); 363 + smu_print("%s: SMC_MSG_AllowZstatesEntr msg = ALLOW, param = %d\n", __func__, param); 364 364 break; 365 365 366 366 case DCN_ZSTATE_SUPPORT_DISALLOW: 367 367 msg_id = VBIOSSMC_MSG_AllowZstatesEntry; 368 368 param = 0; 369 + smu_print("%s: SMC_MSG_AllowZstatesEntr msg_id = DISALLOW, param = %d\n", __func__, param); 369 370 break; 370 371 371 372 372 373 case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY: 373 374 msg_id = VBIOSSMC_MSG_AllowZstatesEntry; 374 375 param = (1 << 10); 376 + smu_print("%s: SMC_MSG_AllowZstatesEntr msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param); 375 377 break; 376 378 377 379 case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY: 378 380 msg_id = VBIOSSMC_MSG_AllowZstatesEntry; 379 381 param = (1 << 10) | (1 << 8); 382 + smu_print("%s: SMC_MSG_AllowZstatesEntr msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param); 380 383 break; 381 384 382 385 case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY: 383 386 msg_id = VBIOSSMC_MSG_AllowZstatesEntry; 384 387 param = (1 << 8); 388 + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param); 385 389 break; 386 390 387 391 default: //DCN_ZSTATE_SUPPORT_UNKNOWN ··· 396 390 } 397 391 398 392 399 - dcn35_smu_send_msg_with_param( 393 + retv = dcn35_smu_send_msg_with_param( 400 394 clk_mgr, 401 395 msg_id, 402 396 param); 403 - smu_print("dcn35_smu_set_zstate_support msg_id = %d, param = %d\n", msg_id, param); 397 + smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv); 404 398 } 405 399 406 400 int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr) ··· 414 408 VBIOSSMC_MSG_GetDprefclkFreq, 415 409 0); 416 410 417 - smu_print("dcn35_smu_get_DPREF clk = %d mhz\n", dprefclk); 411 + smu_print("%s: SMU DPREF clk = %d mhz\n", __func__, dprefclk); 418 412 return dprefclk * 1000; 419 413 } 420 414 ··· 429 423 VBIOSSMC_MSG_GetDtbclkFreq, 430 424 0); 431 425 432 - smu_print("dcn35_smu_get_dtbclk = %d mhz\n", dtbclk); 426 + smu_print("%s: get_dtbclk = %dmhz\n", __func__, dtbclk); 433 427 return dtbclk * 1000; 434 428 } 435 429 /* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */ ··· 442 436 clk_mgr, 443 437 VBIOSSMC_MSG_SetDtbClk, 444 438 enable); 445 - smu_print("dcn35_smu_set_dtbclk = %d \n", enable ? 1 : 0); 439 + smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0); 446 440 } 447 441 448 442 void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) ··· 451 445 clk_mgr, 452 446 VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, 453 447 enable); 448 + smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0); 454 449 } 455 450 456 451 int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) 457 452 { 458 - return dcn35_smu_send_msg_with_param( 453 + int retv; 454 + 455 + retv = dcn35_smu_send_msg_with_param( 459 456 clk_mgr, 460 457 VBIOSSMC_MSG_DispPsrExit, 461 458 0); 459 + smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv); 460 + return retv; 462 461 } 463 462 464 463 int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) 465 464 { 466 - return dcn35_smu_send_msg_with_param( 465 + int retv; 466 + 467 + retv = dcn35_smu_send_msg_with_param( 467 468 clk_mgr, 468 469 VBIOSSMC_MSG_QueryIPS2Support, 469 470 0); 471 + 472 + //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv); 473 + return retv; 470 474 } 471 475 472 476 void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) 473 477 { 474 478 REG_WRITE(MP1_SMN_C2PMSG_71, param); 479 + //smu_print("%s: write_ips_scratch = %x\n", __func__, param); 475 480 } 476 481 477 482 uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) 478 483 { 479 - return REG_READ(MP1_SMN_C2PMSG_71); 484 + uint32_t retv; 485 + 486 + retv = REG_READ(MP1_SMN_C2PMSG_71); 487 + //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv); 488 + return retv; 480 489 }
+6 -12
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 1521 1521 1522 1522 } 1523 1523 1524 - /* remove any other pipes that are already been synced */ 1524 + /* remove any other unblanked pipes as they have already been synced */ 1525 1525 if (dc->config.use_pipe_ctx_sync_logic) { 1526 1526 /* check pipe's syncd to decide which pipe to be removed */ 1527 1527 for (j = 1; j < group_size; j++) { ··· 1534 1534 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1535 1535 } 1536 1536 } else { 1537 + /* remove any other pipes by checking valid plane */ 1537 1538 for (j = j + 1; j < group_size; j++) { 1538 1539 bool is_blanked; 1539 1540 ··· 2259 2258 { 2260 2259 int i, j; 2261 2260 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 2262 - #ifdef CONFIG_DRM_AMD_DC_FP 2263 - struct dml2_context *dml2 = NULL; 2264 - #endif 2265 2261 2266 2262 if (!new_ctx) 2267 2263 return NULL; 2268 2264 memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 2269 2265 2270 2266 #ifdef CONFIG_DRM_AMD_DC_FP 2271 - if (new_ctx->bw_ctx.dml2) { 2272 - dml2 = kzalloc(sizeof(struct dml2_context), GFP_KERNEL); 2273 - if (!dml2) 2274 - return NULL; 2275 - 2276 - memcpy(dml2, src_ctx->bw_ctx.dml2, sizeof(struct dml2_context)); 2277 - new_ctx->bw_ctx.dml2 = dml2; 2278 - } 2267 + if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) { 2268 + dc_release_state(new_ctx); 2269 + return NULL; 2270 + } 2279 2271 #endif 2280 2272 2281 2273 for (i = 0; i < MAX_PIPES; i++) {
+3 -1
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 4773 4773 option = DITHER_OPTION_SPATIAL8; 4774 4774 break; 4775 4775 case COLOR_DEPTH_101010: 4776 - option = DITHER_OPTION_SPATIAL10; 4776 + option = DITHER_OPTION_TRUN10; 4777 4777 break; 4778 4778 default: 4779 4779 option = DITHER_OPTION_DISABLE; ··· 4799 4799 option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { 4800 4800 fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; 4801 4801 fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; 4802 + if (option == DITHER_OPTION_TRUN10) 4803 + fmt_bit_depth->flags.TRUNCATE_MODE = 1; 4802 4804 } 4803 4805 4804 4806 /* special case - Formatter can only reduce by 4 bits at most.
+68 -12
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 467 467 struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 468 468 dwb->otg_inst = stream_status->primary_otg_inst; 469 469 } 470 + 471 + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { 472 + dm_error("DC: update_bandwidth failed!\n"); 473 + return false; 474 + } 475 + 476 + /* enable writeback */ 477 + if (dc->hwss.enable_writeback) { 478 + struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 479 + 480 + if (dwb->funcs->is_enabled(dwb)) { 481 + /* writeback pipe already enabled, only need to update */ 482 + dc->hwss.update_writeback(dc, wb_info, dc->current_state); 483 + } else { 484 + /* Enable writeback pipe from scratch*/ 485 + dc->hwss.enable_writeback(dc, wb_info, dc->current_state); 486 + } 487 + } 488 + 489 + return true; 490 + } 491 + 492 + bool dc_stream_fc_disable_writeback(struct dc *dc, 493 + struct dc_stream_state *stream, 494 + uint32_t dwb_pipe_inst) 495 + { 496 + struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; 497 + 498 + if (stream == NULL) { 499 + dm_error("DC: dc_stream is NULL!\n"); 500 + return false; 501 + } 502 + 503 + if (dwb_pipe_inst >= MAX_DWB_PIPES) { 504 + dm_error("DC: writeback pipe is invalid!\n"); 505 + return false; 506 + } 507 + 508 + if (stream->num_wb_info > MAX_DWB_PIPES) { 509 + dm_error("DC: num_wb_info is invalid!\n"); 510 + return false; 511 + } 512 + 513 + if (dwb->funcs->set_fc_enable) 514 + dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE); 515 + 470 516 return true; 471 517 } 472 518 ··· 536 490 return false; 537 491 } 538 492 539 - // stream->writeback_info[dwb_pipe_inst].wb_enabled = false; 540 - for (i = 0; i < stream->num_wb_info; i++) { 541 - /*dynamic update*/ 542 - if (stream->writeback_info[i].wb_enabled && 543 - stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) { 544 - stream->writeback_info[i].wb_enabled = false; 545 - } 546 - } 547 - 548 493 /* remove writeback info for disabled writeback pipes from stream */ 549 494 for (i = 0, j = 0; i < stream->num_wb_info; i++) { 550 495 if (stream->writeback_info[i].wb_enabled) { 551 - if (j < i) 552 - /* trim the array */ 496 + 497 + if (stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) 498 + stream->writeback_info[i].wb_enabled = false; 499 + 500 + /* trim the array */ 501 + if (j < i) { 553 502 memcpy(&stream->writeback_info[j], &stream->writeback_info[i], 554 503 sizeof(struct dc_writeback_info)); 555 - j++; 504 + j++; 505 + } 556 506 } 557 507 } 558 508 stream->num_wb_info = j; 509 + 510 + /* recalculate and apply DML parameters */ 511 + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { 512 + dm_error("DC: update_bandwidth failed!\n"); 513 + return false; 514 + } 515 + 516 + /* disable writeback */ 517 + if (dc->hwss.disable_writeback) { 518 + struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; 519 + 520 + if (dwb->funcs->is_enabled(dwb)) 521 + dc->hwss.disable_writeback(dc, dwb_pipe_inst); 522 + } 559 523 560 524 return true; 561 525 }
+8 -2
drivers/gpu/drm/amd/display/dc/dc.h
··· 49 49 struct set_config_cmd_payload; 50 50 struct dmub_notification; 51 51 52 - #define DC_VER "3.2.262" 52 + #define DC_VER "3.2.263" 53 53 54 54 #define MAX_SURFACES 3 55 55 #define MAX_PLANES 6 ··· 1541 1541 bool is_dig_mapping_flexible; 1542 1542 bool hpd_status; /* HPD status of link without physical HPD pin. */ 1543 1543 bool is_hpd_pending; /* Indicates a new received hpd */ 1544 - bool is_automated; /* Indicates automated testing */ 1544 + 1545 + /* USB4 DPIA links skip verifying link cap, instead performing the fallback method 1546 + * for every link training. This is incompatible with DP LL compliance automation, 1547 + * which expects the same link settings to be used every retry on a link loss. 1548 + * This flag is used to skip the fallback when link loss occurs during automation. 1549 + */ 1550 + bool skip_fallback_on_link_loss; 1545 1551 1546 1552 bool edp_sink_present; 1547 1553
+1 -1
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
··· 140 140 enum bp_result (*enable_lvtma_control)( 141 141 struct dc_bios *bios, 142 142 uint8_t uc_pwr_on, 143 - uint8_t panel_instance, 143 + uint8_t pwrseq_instance, 144 144 uint8_t bypass_panel_control_wait); 145 145 146 146 enum bp_result (*get_soc_bb_info)(
+1
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
··· 465 465 struct fixed31_32 v_scale_ratio; 466 466 enum dc_rotation_angle rotation; 467 467 bool mirror; 468 + struct dc_stream_state *stream; 468 469 }; 469 470 470 471 /* IPP related types */
+4
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 454 454 struct dc_stream_state *stream, 455 455 struct dc_writeback_info *wb_info); 456 456 457 + bool dc_stream_fc_disable_writeback(struct dc *dc, 458 + struct dc_stream_state *stream, 459 + uint32_t dwb_pipe_inst); 460 + 457 461 bool dc_stream_remove_writeback(struct dc *dc, 458 462 struct dc_stream_state *stream, 459 463 uint32_t dwb_pipe_inst);
+6 -2
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
··· 145 145 return ret; 146 146 } 147 147 148 - static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) 148 + static bool dmub_abm_set_pipe_ex(struct abm *abm, 149 + uint32_t otg_inst, 150 + uint32_t option, 151 + uint32_t panel_inst, 152 + uint32_t pwrseq_inst) 149 153 { 150 154 bool ret = false; 151 155 unsigned int feature_support; ··· 157 153 feature_support = abm_feature_support(abm, panel_inst); 158 154 159 155 if (feature_support == ABM_LCD_SUPPORT) 160 - ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst); 156 + ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst, pwrseq_inst); 161 157 162 158 return ret; 163 159 }
+6 -1
drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
··· 254 254 return true; 255 255 } 256 256 257 - bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) 257 + bool dmub_abm_set_pipe(struct abm *abm, 258 + uint32_t otg_inst, 259 + uint32_t option, 260 + uint32_t panel_inst, 261 + uint32_t pwrseq_inst) 258 262 { 259 263 union dmub_rb_cmd cmd; 260 264 struct dc_context *dc = abm->ctx; ··· 268 264 cmd.abm_set_pipe.header.type = DMUB_CMD__ABM; 269 265 cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE; 270 266 cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst; 267 + cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst; 271 268 cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option; 272 269 cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst; 273 270 cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+1 -1
drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
··· 44 44 struct dc_context *dc, 45 45 unsigned int panel_inst, 46 46 struct abm_save_restore *pData); 47 - bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst); 47 + bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst); 48 48 bool dmub_abm_set_backlight_level(struct abm *abm, 49 49 unsigned int backlight_pwm_u16_16, 50 50 unsigned int frame_ramp,
+32
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
··· 296 296 type DTBCLK_P1_GATE_DISABLE;\ 297 297 type DTBCLK_P2_GATE_DISABLE;\ 298 298 type DTBCLK_P3_GATE_DISABLE;\ 299 + type DSCCLK0_ROOT_GATE_DISABLE;\ 300 + type DSCCLK1_ROOT_GATE_DISABLE;\ 301 + type DSCCLK2_ROOT_GATE_DISABLE;\ 302 + type DSCCLK3_ROOT_GATE_DISABLE;\ 303 + type SYMCLKA_FE_ROOT_GATE_DISABLE;\ 304 + type SYMCLKB_FE_ROOT_GATE_DISABLE;\ 305 + type SYMCLKC_FE_ROOT_GATE_DISABLE;\ 306 + type SYMCLKD_FE_ROOT_GATE_DISABLE;\ 307 + type SYMCLKE_FE_ROOT_GATE_DISABLE;\ 308 + type DPPCLK0_ROOT_GATE_DISABLE;\ 309 + type DPPCLK1_ROOT_GATE_DISABLE;\ 310 + type DPPCLK2_ROOT_GATE_DISABLE;\ 311 + type DPPCLK3_ROOT_GATE_DISABLE;\ 312 + type HDMISTREAMCLK0_ROOT_GATE_DISABLE;\ 313 + type SYMCLKA_ROOT_GATE_DISABLE;\ 314 + type SYMCLKB_ROOT_GATE_DISABLE;\ 315 + type SYMCLKC_ROOT_GATE_DISABLE;\ 316 + type SYMCLKD_ROOT_GATE_DISABLE;\ 317 + type SYMCLKE_ROOT_GATE_DISABLE;\ 318 + type PHYA_REFCLK_ROOT_GATE_DISABLE;\ 319 + type PHYB_REFCLK_ROOT_GATE_DISABLE;\ 320 + type PHYC_REFCLK_ROOT_GATE_DISABLE;\ 321 + type PHYD_REFCLK_ROOT_GATE_DISABLE;\ 322 + type PHYE_REFCLK_ROOT_GATE_DISABLE;\ 323 + type DPSTREAMCLK0_ROOT_GATE_DISABLE;\ 324 + type DPSTREAMCLK1_ROOT_GATE_DISABLE;\ 325 + type DPSTREAMCLK2_ROOT_GATE_DISABLE;\ 326 + type DPSTREAMCLK3_ROOT_GATE_DISABLE;\ 327 + type DPSTREAMCLK0_GATE_DISABLE;\ 328 + type DPSTREAMCLK1_GATE_DISABLE;\ 329 + type DPSTREAMCLK2_GATE_DISABLE;\ 330 + type DPSTREAMCLK3_GATE_DISABLE;\ 299 331 300 332 struct dccg_shift { 301 333 DCCG_REG_FIELD_LIST(uint8_t)
+10 -2
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
··· 1077 1077 if (src_y_offset < 0) 1078 1078 src_y_offset = 0; 1079 1079 /* Save necessary cursor info x, y position. w, h is saved in attribute func. */ 1080 - hubp->cur_rect.x = src_x_offset + param->viewport.x; 1081 - hubp->cur_rect.y = src_y_offset + param->viewport.y; 1080 + if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 1081 + param->rotation != ROTATION_ANGLE_0) { 1082 + hubp->cur_rect.x = 0; 1083 + hubp->cur_rect.y = 0; 1084 + hubp->cur_rect.w = param->stream->timing.h_addressable; 1085 + hubp->cur_rect.h = param->stream->timing.v_addressable; 1086 + } else { 1087 + hubp->cur_rect.x = src_x_offset + param->viewport.x; 1088 + hubp->cur_rect.y = src_y_offset + param->viewport.y; 1089 + } 1082 1090 } 1083 1091 1084 1092 void hubp2_clk_cntl(struct hubp *hubp, bool enable)
+23
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
··· 130 130 return true; 131 131 } 132 132 133 + void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable) 134 + { 135 + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); 136 + unsigned int pre_locked; 137 + 138 + REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked); 139 + 140 + /* Lock DWB registers */ 141 + if (pre_locked == 0) 142 + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1); 143 + 144 + /* Disable FC */ 145 + REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable); 146 + 147 + /* Unlock DWB registers */ 148 + if (pre_locked == 0) 149 + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0); 150 + 151 + DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst); 152 + } 153 + 154 + 133 155 bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params) 134 156 { 135 157 struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); ··· 248 226 .disable = dwb3_disable, 249 227 .update = dwb3_update, 250 228 .is_enabled = dwb3_is_enabled, 229 + .set_fc_enable = dwb3_set_fc_enable, 251 230 .set_stereo = dwb3_set_stereo, 252 231 .set_new_content = dwb3_set_new_content, 253 232 .dwb_program_output_csc = NULL,
+2
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
··· 877 877 878 878 bool dwb3_is_enabled(struct dwbc *dwbc); 879 879 880 + void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable); 881 + 880 882 void dwb3_set_stereo(struct dwbc *dwbc, 881 883 struct dwb_stereo_params *stereo_params); 882 884
+3
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
··· 243 243 return false; 244 244 } 245 245 246 + if (params->hw_points_num == 0) 247 + return false; 248 + 246 249 REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2); 247 250 248 251 current_mode = dwb3_get_ogam_current(dwbc30);
+3 -2
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
··· 50 50 cmd->panel_cntl.header.type = DMUB_CMD__PANEL_CNTL; 51 51 cmd->panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO; 52 52 cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data); 53 - cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst; 53 + cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst; 54 54 55 55 return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 56 56 } ··· 78 78 cmd.panel_cntl.header.type = DMUB_CMD__PANEL_CNTL; 79 79 cmd.panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_HW_INIT; 80 80 cmd.panel_cntl.header.payload_bytes = sizeof(cmd.panel_cntl.data); 81 - cmd.panel_cntl.data.inst = dcn31_panel_cntl->base.inst; 81 + cmd.panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst; 82 82 cmd.panel_cntl.data.bl_pwm_cntl = panel_cntl->stored_backlight_registers.BL_PWM_CNTL; 83 83 cmd.panel_cntl.data.bl_pwm_period_cntl = panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL; 84 84 cmd.panel_cntl.data.bl_pwm_ref_div1 = ··· 157 157 dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs; 158 158 dcn31_panel_cntl->base.ctx = init_data->ctx; 159 159 dcn31_panel_cntl->base.inst = init_data->inst; 160 + dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst; 160 161 }
+2 -1
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
··· 71 71 { 72 72 struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); 73 73 74 + REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_1DLUT_MEM_PWR_DIS, power_on); 75 + 74 76 if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) { 75 77 if (power_on) { 76 78 REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0); 77 79 REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5); 78 80 } else if (!mpc->ctx->dc->debug.disable_mem_low_power) { 79 - ASSERT(false); 80 81 /* TODO: change to mpc 81 82 * dpp_base->ctx->dc->optimized_required = true; 82 83 * dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
+26
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
··· 806 806 807 807 return result; 808 808 } 809 + 810 + void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, 811 + display_e2e_pipe_params_st *pipes) 812 + { 813 + int i, pipe_cnt; 814 + struct resource_context *res_ctx = &context->res_ctx; 815 + struct pipe_ctx *pipe = NULL; 816 + 817 + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { 818 + int odm_slice_count = 0; 819 + 820 + if (!res_ctx->pipe_ctx[i].stream) 821 + continue; 822 + pipe = &res_ctx->pipe_ctx[i]; 823 + odm_slice_count = resource_get_odm_slice_count(pipe); 824 + 825 + if (odm_slice_count == 1) 826 + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; 827 + else if (odm_slice_count == 2) 828 + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; 829 + else if (odm_slice_count == 4) 830 + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; 831 + 832 + pipe_cnt++; 833 + } 834 + }
+60 -2
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
··· 506 506 dccg->dpp_clock_gated[dpp_inst] = !clock_on; 507 507 } 508 508 509 + static void dccg35_disable_symclk32_se( 510 + struct dccg *dccg, 511 + int hpo_se_inst) 512 + { 513 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 514 + 515 + /* set refclk as the source for symclk32_se */ 516 + switch (hpo_se_inst) { 517 + case 0: 518 + REG_UPDATE_2(SYMCLK32_SE_CNTL, 519 + SYMCLK32_SE0_SRC_SEL, 0, 520 + SYMCLK32_SE0_EN, 0); 521 + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { 522 + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, 523 + SYMCLK32_SE0_GATE_DISABLE, 0); 524 + // REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, 525 + // SYMCLK32_ROOT_SE0_GATE_DISABLE, 0); 526 + } 527 + break; 528 + case 1: 529 + REG_UPDATE_2(SYMCLK32_SE_CNTL, 530 + SYMCLK32_SE1_SRC_SEL, 0, 531 + SYMCLK32_SE1_EN, 0); 532 + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { 533 + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, 534 + SYMCLK32_SE1_GATE_DISABLE, 0); 535 + // REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, 536 + // SYMCLK32_ROOT_SE1_GATE_DISABLE, 0); 537 + } 538 + break; 539 + case 2: 540 + REG_UPDATE_2(SYMCLK32_SE_CNTL, 541 + SYMCLK32_SE2_SRC_SEL, 0, 542 + SYMCLK32_SE2_EN, 0); 543 + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { 544 + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, 545 + SYMCLK32_SE2_GATE_DISABLE, 0); 546 + // REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, 547 + // SYMCLK32_ROOT_SE2_GATE_DISABLE, 0); 548 + } 549 + break; 550 + case 3: 551 + REG_UPDATE_2(SYMCLK32_SE_CNTL, 552 + SYMCLK32_SE3_SRC_SEL, 0, 553 + SYMCLK32_SE3_EN, 0); 554 + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { 555 + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, 556 + SYMCLK32_SE3_GATE_DISABLE, 0); 557 + // REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, 558 + // SYMCLK32_ROOT_SE3_GATE_DISABLE, 0); 559 + } 560 + break; 561 + default: 562 + BREAK_TO_DEBUGGER(); 563 + return; 564 + } 565 + } 566 + 509 567 void dccg35_init(struct dccg *dccg) 510 568 { 511 569 int otg_inst; ··· 572 514 * will cause DCN to hang. 573 515 */ 574 516 for (otg_inst = 0; otg_inst < 4; otg_inst++) 575 - dccg31_disable_symclk32_se(dccg, otg_inst); 517 + dccg35_disable_symclk32_se(dccg, otg_inst); 576 518 577 519 if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) 578 520 for (otg_inst = 0; otg_inst < 2; otg_inst++) ··· 846 788 .dccg_init = dccg35_init, 847 789 .set_dpstreamclk = dccg35_set_dpstreamclk, 848 790 .enable_symclk32_se = dccg31_enable_symclk32_se, 849 - .disable_symclk32_se = dccg31_disable_symclk32_se, 791 + .disable_symclk32_se = dccg35_disable_symclk32_se, 850 792 .enable_symclk32_le = dccg31_enable_symclk32_le, 851 793 .disable_symclk32_le = dccg31_disable_symclk32_le, 852 794 .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
+51
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
··· 34 34 #define DCCG_REG_LIST_DCN35() \ 35 35 DCCG_REG_LIST_DCN314(),\ 36 36 SR(DPPCLK_CTRL),\ 37 + SR(DCCG_GATE_DISABLE_CNTL4),\ 37 38 SR(DCCG_GATE_DISABLE_CNTL5),\ 38 39 SR(DCCG_GATE_DISABLE_CNTL6),\ 39 40 SR(DCCG_GLOBAL_FGCG_REP_CNTL),\ ··· 181 180 DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\ 182 181 DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\ 183 182 DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\ 183 + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, mask_sh),\ 184 + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, mask_sh),\ 185 + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, mask_sh),\ 186 + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, mask_sh),\ 187 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, mask_sh),\ 188 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, mask_sh),\ 189 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, mask_sh),\ 190 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, mask_sh),\ 191 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, mask_sh),\ 192 + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, mask_sh),\ 193 + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, mask_sh),\ 194 + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, mask_sh),\ 195 + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, mask_sh),\ 196 + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh),\ 197 + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, HDMICHARCLK0_ROOT_GATE_DISABLE, mask_sh),\ 198 + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, HDMISTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\ 199 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, mask_sh),\ 200 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, mask_sh),\ 201 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, mask_sh),\ 202 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, mask_sh),\ 203 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, mask_sh),\ 204 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\ 205 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\ 206 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\ 207 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\ 208 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\ 209 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\ 210 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\ 211 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\ 212 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\ 213 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\ 214 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\ 215 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\ 216 + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYA_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ 217 + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYB_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ 218 + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYC_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ 219 + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYD_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ 220 + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYE_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ 221 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\ 222 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, mask_sh),\ 223 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, mask_sh),\ 224 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, mask_sh),\ 225 + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\ 226 + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\ 227 + DCCG_SF(DCCG_GATE_DISABLE_CNTL, DISPCLK_DCCG_GATE_DISABLE, mask_sh),\ 228 + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, HDMISTREAMCLK0_GATE_DISABLE, mask_sh),\ 229 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\ 230 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ 231 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ 232 + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ 184 233 185 234 struct dccg *dccg35_create( 186 235 struct dc_context *ctx,
+17 -12
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
··· 3542 3542 { 3543 3543 struct vba_vars_st *v = &mode_lib->vba; 3544 3544 int MinPrefetchMode, MaxPrefetchMode; 3545 - int i; 3545 + int i, start_state; 3546 3546 unsigned int j, k, m; 3547 3547 bool EnoughWritebackUnits = true; 3548 3548 bool WritebackModeSupport = true; ··· 3552 3552 bool NotUrgentLatencyHiding[DC__NUM_DPP__MAX] = { 0 }; 3553 3553 3554 3554 /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ 3555 + 3556 + if (mode_lib->validate_max_state) 3557 + start_state = v->soc.num_states - 1; 3558 + else 3559 + start_state = 0; 3555 3560 3556 3561 CalculateMinAndMaxPrefetchMode( 3557 3562 mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, ··· 3856 3851 v->SingleDPPViewportSizeSupportPerPlane, 3857 3852 &v->ViewportSizeSupport[0][0]); 3858 3853 3859 - for (i = 0; i < v->soc.num_states; i++) { 3854 + for (i = start_state; i < v->soc.num_states; i++) { 3860 3855 for (j = 0; j < 2; j++) { 3861 3856 v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed); 3862 3857 v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed); ··· 4012 4007 4013 4008 /*Total Available Pipes Support Check*/ 4014 4009 4015 - for (i = 0; i < v->soc.num_states; i++) { 4010 + for (i = start_state; i < v->soc.num_states; i++) { 4016 4011 for (j = 0; j < 2; j++) { 4017 4012 if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) { 4018 4013 v->TotalAvailablePipesSupport[i][j] = true; ··· 4051 4046 } 4052 4047 } 4053 4048 4054 - for (i = 0; i < v->soc.num_states; i++) { 4049 + for (i = start_state; i < v->soc.num_states; i++) { 4055 4050 for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { 4056 4051 v->RequiresDSC[i][k] = false; 4057 4052 v->RequiresFEC[i][k] = false; ··· 4179 4174 } 4180 4175 } 4181 4176 } 4182 - for (i = 0; i < v->soc.num_states; i++) { 4177 + for (i = start_state; i < v->soc.num_states; i++) { 4183 4178 v->DIOSupport[i] = true; 4184 4179 for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { 4185 4180 if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi) ··· 4190 4185 } 4191 4186 } 4192 4187 4193 - for (i = 0; i < v->soc.num_states; ++i) { 4188 + for (i = start_state; i < v->soc.num_states; ++i) { 4194 4189 v->ODMCombine4To1SupportCheckOK[i] = true; 4195 4190 for (k = 0; k < v->NumberOfActivePlanes; ++k) { 4196 4191 if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 ··· 4202 4197 4203 4198 /* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */ 4204 4199 4205 - for (i = 0; i < v->soc.num_states; i++) { 4200 + for (i = start_state; i < v->soc.num_states; i++) { 4206 4201 v->NotEnoughDSCUnits[i] = false; 4207 4202 v->TotalDSCUnitsRequired = 0.0; 4208 4203 for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { ··· 4222 4217 } 4223 4218 /*DSC Delay per state*/ 4224 4219 4225 - for (i = 0; i < v->soc.num_states; i++) { 4220 + for (i = start_state; i < v->soc.num_states; i++) { 4226 4221 for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { 4227 4222 if (v->OutputBppPerState[i][k] == BPP_INVALID) { 4228 4223 v->BPP = 0.0; ··· 4338 4333 v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k]; 4339 4334 } 4340 4335 4341 - for (i = 0; i < v->soc.num_states; i++) { 4336 + for (i = start_state; i < v->soc.num_states; i++) { 4342 4337 for (j = 0; j < 2; j++) { 4343 4338 for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { 4344 4339 v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k]; ··· 5080 5075 5081 5076 /*PTE Buffer Size Check*/ 5082 5077 5083 - for (i = 0; i < v->soc.num_states; i++) { 5078 + for (i = start_state; i < v->soc.num_states; i++) { 5084 5079 for (j = 0; j < 2; j++) { 5085 5080 v->PTEBufferSizeNotExceeded[i][j] = true; 5086 5081 for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { ··· 5141 5136 } 5142 5137 /*Mode Support, Voltage State and SOC Configuration*/ 5143 5138 5144 - for (i = v->soc.num_states - 1; i >= 0; i--) { 5139 + for (i = v->soc.num_states - 1; i >= start_state; i--) { 5145 5140 for (j = 0; j < 2; j++) { 5146 5141 if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1 5147 5142 && v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1 ··· 5163 5158 } 5164 5159 { 5165 5160 unsigned int MaximumMPCCombine = 0; 5166 - for (i = v->soc.num_states; i >= 0; i--) { 5161 + for (i = v->soc.num_states; i >= start_state; i--) { 5167 5162 if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) { 5168 5163 v->VoltageLevel = i; 5169 5164 v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true;
+3
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
··· 2192 2192 int i; 2193 2193 2194 2194 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); 2195 + dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); 2195 2196 2196 2197 /* repopulate_pipes = 1 means the pipes were either split or merged. In this case 2197 2198 * we have to re-calculate the DET allocation and run through DML once more to ··· 2201 2200 * */ 2202 2201 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = 2203 2202 dm_prefetch_support_uclk_fclk_and_stutter_if_possible; 2203 + 2204 2204 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 2205 + 2205 2206 if (vlevel == context->bw_ctx.dml.soc.num_states) { 2206 2207 /* failed after DET size changes */ 2207 2208 goto validate_fail;
+19
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
··· 326 326 dcn3_5_soc.dram_clock_change_latency_us = 327 327 dc->debug.dram_clock_change_latency_ns / 1000.0; 328 328 } 329 + 330 + if (dc->bb_overrides.dram_clock_change_latency_ns > 0) 331 + dcn3_5_soc.dram_clock_change_latency_us = 332 + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; 333 + 334 + if (dc->bb_overrides.sr_exit_time_ns > 0) 335 + dcn3_5_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; 336 + 337 + if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0) 338 + dcn3_5_soc.sr_enter_plus_exit_time_us = 339 + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; 340 + 341 + if (dc->bb_overrides.sr_exit_z8_time_ns > 0) 342 + dcn3_5_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0; 343 + 344 + if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0) 345 + dcn3_5_soc.sr_enter_plus_exit_z8_time_us = 346 + dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0; 347 + 329 348 /*temp till dml2 fully work without dml1*/ 330 349 dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, 331 350 DML_PROJECT_DCN31);
+3 -1
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
··· 341 341 break; 342 342 } 343 343 344 + if (dml2->config.bbox_overrides.clks_table.num_states) 345 + p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states; 346 + 344 347 /* Override from passed values, if available */ 345 348 for (i = 0; i < p->in_states->num_states; i++) { 346 349 if (dml2->config.bbox_overrides.sr_exit_latency_us) { ··· 400 397 } 401 398 /* Copy clocks tables entries, if available */ 402 399 if (dml2->config.bbox_overrides.clks_table.num_states) { 403 - p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states; 404 400 405 401 for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) { 406 402 p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
+11
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
··· 157 157 { 158 158 /* If this assert is hit then we have a link encoder dynamic management issue */ 159 159 ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); 160 + 161 + if (pipe_ctx->stream == NULL) 162 + return false; 163 + /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */ 164 + if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) { 165 + return (pipe_ctx->stream_res.hpo_dp_stream_enc && 166 + pipe_ctx->link_res.hpo_dp_link_enc && 167 + dc_is_dp_signal(pipe_ctx->stream->signal) && 168 + (pipe_ctx->stream->link->remote_sinks[0] == pipe_ctx->stream->sink)); 169 + } 170 + 160 171 return (pipe_ctx->stream_res.hpo_dp_stream_enc && 161 172 pipe_ctx->link_res.hpo_dp_link_enc && 162 173 dc_is_dp_signal(pipe_ctx->stream->signal));
+28 -1
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
··· 691 691 return out; 692 692 } 693 693 694 + static inline struct dml2_context *dml2_allocate_memory(void) 695 + { 696 + return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); 697 + } 698 + 694 699 bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) 695 700 { 696 701 // Allocate Mode Lib Ctx 697 - *dml2 = (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); 702 + *dml2 = dml2_allocate_memory(); 698 703 699 704 if (!(*dml2)) 700 705 return false; ··· 749 744 { 750 745 *fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0]; 751 746 *dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0]; 747 + } 748 + 749 + void dml2_copy(struct dml2_context *dst_dml2, 750 + struct dml2_context *src_dml2) 751 + { 752 + /* copy Mode Lib Ctx */ 753 + memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context)); 754 + } 755 + 756 + bool dml2_create_copy(struct dml2_context **dst_dml2, 757 + struct dml2_context *src_dml2) 758 + { 759 + /* Allocate Mode Lib Ctx */ 760 + *dst_dml2 = dml2_allocate_memory(); 761 + 762 + if (!(*dst_dml2)) 763 + return false; 764 + 765 + /* copy Mode Lib Ctx */ 766 + dml2_copy(*dst_dml2, src_dml2); 767 + 768 + return true; 752 769 }
+4
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
··· 191 191 struct dml2_context **dml2); 192 192 193 193 void dml2_destroy(struct dml2_context *dml2); 194 + void dml2_copy(struct dml2_context *dst_dml2, 195 + struct dml2_context *src_dml2); 196 + bool dml2_create_copy(struct dml2_context **dst_dml2, 197 + struct dml2_context *src_dml2); 194 198 195 199 /* 196 200 * dml2_validate - Determines if a display configuration is supported or not.
+8 -8
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
··· 790 790 struct dc_context *ctx = link->ctx; 791 791 struct bp_transmitter_control cntl = { 0 }; 792 792 enum bp_result bp_result; 793 - uint8_t panel_instance; 793 + uint8_t pwrseq_instance; 794 794 795 795 796 796 if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) ··· 873 873 cntl.coherent = false; 874 874 cntl.lanes_number = LANE_COUNT_FOUR; 875 875 cntl.hpd_sel = link->link_enc->hpd_source; 876 - panel_instance = link->panel_cntl->inst; 876 + pwrseq_instance = link->panel_cntl->pwrseq_inst; 877 877 878 878 if (ctx->dc->ctx->dmub_srv && 879 879 ctx->dc->debug.dmub_command_table) { ··· 881 881 if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) { 882 882 bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, 883 883 LVTMA_CONTROL_POWER_ON, 884 - panel_instance, link->link_powered_externally); 884 + pwrseq_instance, link->link_powered_externally); 885 885 } else { 886 886 bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, 887 887 LVTMA_CONTROL_POWER_OFF, 888 - panel_instance, link->link_powered_externally); 888 + pwrseq_instance, link->link_powered_externally); 889 889 } 890 890 } 891 891 ··· 956 956 { 957 957 struct dc_context *ctx = link->ctx; 958 958 struct bp_transmitter_control cntl = { 0 }; 959 - uint8_t panel_instance; 959 + uint8_t pwrseq_instance; 960 960 unsigned int pre_T11_delay = OLED_PRE_T11_DELAY; 961 961 unsigned int post_T7_delay = OLED_POST_T7_DELAY; 962 962 ··· 1009 1009 */ 1010 1010 /* dc_service_sleep_in_milliseconds(50); */ 1011 1011 /*edp 1.2*/ 1012 - panel_instance = link->panel_cntl->inst; 1012 + pwrseq_instance = link->panel_cntl->pwrseq_inst; 1013 1013 1014 1014 if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) { 1015 1015 if (!link->dc->config.edp_no_power_sequencing) ··· 1034 1034 if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) 1035 1035 ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, 1036 1036 LVTMA_CONTROL_LCD_BLON, 1037 - panel_instance, link->link_powered_externally); 1037 + pwrseq_instance, link->link_powered_externally); 1038 1038 else 1039 1039 ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, 1040 1040 LVTMA_CONTROL_LCD_BLOFF, 1041 - panel_instance, link->link_powered_externally); 1041 + pwrseq_instance, link->link_powered_externally); 1042 1042 } 1043 1043 1044 1044 link_transmitter_control(ctx->dc_bios, &cntl);
+2 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 3417 3417 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, 3418 3418 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert, 3419 3419 .rotation = pipe_ctx->plane_state->rotation, 3420 - .mirror = pipe_ctx->plane_state->horizontal_mirror 3420 + .mirror = pipe_ctx->plane_state->horizontal_mirror, 3421 + .stream = pipe_ctx->stream, 3421 3422 }; 3422 3423 bool pipe_split_on = false; 3423 3424 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
+28 -8
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
··· 137 137 pipe_ctx->stream->dpms_off = true; 138 138 } 139 139 140 - static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) 140 + static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, 141 + uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst) 141 142 { 142 143 union dmub_rb_cmd cmd; 143 144 struct dc_context *dc = abm->ctx; ··· 148 147 cmd.abm_set_pipe.header.type = DMUB_CMD__ABM; 149 148 cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE; 150 149 cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst; 150 + cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst; 151 151 cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option; 152 152 cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst; 153 153 cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; ··· 181 179 struct abm *abm = pipe_ctx->stream_res.abm; 182 180 uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; 183 181 struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; 184 - 185 182 struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu; 186 183 187 184 if (dmcu) { ··· 191 190 if (abm && panel_cntl) { 192 191 if (abm->funcs && abm->funcs->set_pipe_ex) { 193 192 abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, 194 - panel_cntl->inst); 193 + panel_cntl->inst, panel_cntl->pwrseq_inst); 195 194 } else { 196 - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst); 195 + dmub_abm_set_pipe(abm, 196 + otg_inst, 197 + SET_ABM_PIPE_IMMEDIATELY_DISABLE, 198 + panel_cntl->inst, 199 + panel_cntl->pwrseq_inst); 197 200 } 198 201 panel_cntl->funcs->store_backlight_level(panel_cntl); 199 202 } ··· 217 212 218 213 if (abm && panel_cntl) { 219 214 if (abm->funcs && abm->funcs->set_pipe_ex) { 220 - abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); 215 + abm->funcs->set_pipe_ex(abm, 216 + otg_inst, 217 + SET_ABM_PIPE_NORMAL, 218 + panel_cntl->inst, 219 + panel_cntl->pwrseq_inst); 221 220 } else { 222 - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); 221 + dmub_abm_set_pipe(abm, otg_inst, 222 + SET_ABM_PIPE_NORMAL, 223 + panel_cntl->inst, 224 + panel_cntl->pwrseq_inst); 223 225 } 224 226 } 225 227 } ··· 249 237 250 238 if (abm && panel_cntl) { 251 239 if (abm->funcs && abm->funcs->set_pipe_ex) { 252 - abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); 240 + abm->funcs->set_pipe_ex(abm, 241 + otg_inst, 242 + SET_ABM_PIPE_NORMAL, 243 + panel_cntl->inst, 244 + panel_cntl->pwrseq_inst); 253 245 } else { 254 - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); 246 + dmub_abm_set_pipe(abm, 247 + otg_inst, 248 + SET_ABM_PIPE_NORMAL, 249 + panel_cntl->inst, 250 + panel_cntl->pwrseq_inst); 255 251 } 256 252 } 257 253 }
+4
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 367 367 DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\ 368 368 __func__, wb_info->dwb_pipe_inst,\ 369 369 wb_info->mpcc_inst); 370 + 371 + /* Warmup interface */ 372 + dcn30_mmhubbub_warmup(dc, 1, wb_info); 373 + 370 374 /* Update writeback pipe */ 371 375 dcn30_set_writeback(dc, wb_info, context); 372 376
+2 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
··· 96 96 if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { 97 97 // Power down VPGs 98 98 for (i = 0; i < dc->res_pool->stream_enc_count; i++) 99 - dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); 99 + if (dc->res_pool->stream_enc[i]->vpg) 100 + dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); 100 101 #if defined(CONFIG_DRM_AMD_DC_FP) 101 102 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) 102 103 dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
+25
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 989 989 static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) 990 990 { 991 991 struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; 992 + struct dc *dc = pipe_ctx->stream->ctx->dc; 992 993 struct dc_stream_state *stream = pipe_ctx->stream; 993 994 struct pipe_ctx *odm_pipe; 994 995 int opp_cnt = 1; 996 + struct dccg *dccg = dc->res_pool->dccg; 997 + /* It has been found that when DSCCLK is lower than 16Mhz, we will get DCN 998 + * register access hung. When DSCCLk is based on refclk, DSCCLk is always a 999 + * fixed value higher than 16Mhz so the issue doesn't occur. When DSCCLK is 1000 + * generated by DTO, DSCCLK would be based on 1/3 dispclk. For small timings 1001 + * with DSC such as 480p60Hz, the dispclk could be low enough to trigger 1002 + * this problem. We are implementing a workaround here to keep using dscclk 1003 + * based on fixed value refclk when timing is smaller than 3x16Mhz (i.e 1004 + * 48Mhz) pixel clock to avoid hitting this problem. 1005 + */ 1006 + bool should_use_dto_dscclk = (dccg->funcs->set_dto_dscclk != NULL) && 1007 + stream->timing.pix_clk_100hz > 480000; 995 1008 996 1009 ASSERT(dsc); 997 1010 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) ··· 1027 1014 1028 1015 dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); 1029 1016 dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); 1017 + if (should_use_dto_dscclk) 1018 + dccg->funcs->set_dto_dscclk(dccg, dsc->inst); 1030 1019 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 1031 1020 struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; 1032 1021 1033 1022 ASSERT(odm_dsc); 1034 1023 odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); 1035 1024 odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); 1025 + if (should_use_dto_dscclk) 1026 + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); 1036 1027 } 1037 1028 dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; 1038 1029 dsc_cfg.pic_width *= opp_cnt; ··· 1056 1039 OPTC_DSC_DISABLED, 0, 0); 1057 1040 1058 1041 /* disable DSC block */ 1042 + if (dccg->funcs->set_ref_dscclk) 1043 + dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst); 1059 1044 dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); 1060 1045 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 1061 1046 ASSERT(odm_pipe->stream_res.dsc); 1047 + if (dccg->funcs->set_ref_dscclk) 1048 + dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst); 1062 1049 odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); 1063 1050 } 1064 1051 } ··· 1145 1124 if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe && 1146 1125 current_pipe_ctx->next_odm_pipe->stream_res.dsc) { 1147 1126 struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc; 1127 + struct dccg *dccg = dc->res_pool->dccg; 1128 + 1129 + if (dccg->funcs->set_ref_dscclk) 1130 + dccg->funcs->set_ref_dscclk(dccg, dsc->inst); 1148 1131 /* disconnect DSC block from stream */ 1149 1132 dsc->funcs->dsc_disconnect(dsc); 1150 1133 }
+21 -3
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 979 979 bool hpo_frl_stream_enc_acquired = false; 980 980 bool hpo_dp_stream_enc_acquired = false; 981 981 int i = 0, j = 0; 982 + int edp_num = 0; 983 + struct dc_link *edp_links[MAX_NUM_EDP] = { NULL }; 982 984 983 985 memset(update_state, 0, sizeof(struct pg_block_update)); 984 986 ··· 1021 1019 1022 1020 if (pipe_ctx->stream_res.opp) 1023 1021 update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false; 1024 - 1025 - if (pipe_ctx->stream_res.tg) 1026 - update_state->pg_pipe_res_update[PG_OPTC][pipe_ctx->stream_res.tg->inst] = false; 1027 1022 } 1023 + /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/ 1024 + for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 1025 + struct timing_generator *tg = dc->res_pool->timing_generators[i]; 1026 + if (tg && tg->funcs->is_tg_enabled(tg)) { 1027 + update_state->pg_pipe_res_update[PG_OPTC][i] = false; 1028 + break; 1029 + } 1030 + } 1031 + 1032 + dc_get_edp_links(dc, edp_links, &edp_num); 1033 + if (edp_num == 0 || 1034 + ((!edp_links[0] || !edp_links[0]->edp_sink_present) && 1035 + (!edp_links[1] || !edp_links[1]->edp_sink_present))) { 1036 + /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/ 1037 + update_state->pg_pipe_res_update[PG_OPTC][0] = false; 1038 + } 1039 + 1028 1040 } 1029 1041 1030 1042 void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, ··· 1172 1156 pg_cntl->funcs->dwb_pg_control(pg_cntl, power_on); 1173 1157 } 1174 1158 1159 + /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ 1175 1160 if (pg_cntl->funcs->plane_otg_pg_control) 1176 1161 pg_cntl->funcs->plane_otg_pg_control(pg_cntl, power_on); 1162 + 1177 1163 } 1178 1164 1179 1165 void dcn35_root_clock_control(struct dc *dc,
+2 -1
drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
··· 64 64 bool (*set_pipe_ex)(struct abm *abm, 65 65 unsigned int otg_inst, 66 66 unsigned int option, 67 - unsigned int panel_inst); 67 + unsigned int panel_inst, 68 + unsigned int pwrseq_inst); 68 69 }; 69 70 70 71 #endif
+4
drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
··· 201 201 struct dccg *dccg, 202 202 enum streamclk_source src, 203 203 uint32_t otg_inst); 204 + void (*set_dto_dscclk)( 205 + struct dccg *dccg, 206 + uint32_t dsc_inst); 207 + void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); 204 208 }; 205 209 206 210 #endif //__DAL_DCCG_H__
+4
drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
··· 188 188 bool (*is_enabled)( 189 189 struct dwbc *dwbc); 190 190 191 + void (*set_fc_enable)( 192 + struct dwbc *dwbc, 193 + enum dwb_frame_capture_enable enable); 194 + 191 195 void (*set_stereo)( 192 196 struct dwbc *dwbc, 193 197 struct dwb_stereo_params *stereo_params);
+2
drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
··· 56 56 struct panel_cntl_init_data { 57 57 struct dc_context *ctx; 58 58 uint32_t inst; 59 + uint32_t pwrseq_inst; 59 60 }; 60 61 61 62 struct panel_cntl { 62 63 const struct panel_cntl_funcs *funcs; 63 64 struct dc_context *ctx; 64 65 uint32_t inst; 66 + uint32_t pwrseq_inst; 65 67 /* registers setting needs to be saved and restored at InitBacklight */ 66 68 struct panel_cntl_backlight_registers stored_backlight_registers; 67 69 };
+4 -1
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
··· 1250 1250 /* Store first available for MST second display 1251 1251 * in daisy chain use case 1252 1252 */ 1253 - j = i; 1253 + 1254 + if (pool->stream_enc[i]->id != ENGINE_ID_VIRTUAL) 1255 + j = i; 1256 + 1254 1257 if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id == 1255 1258 link->link_enc->preferred_engine) 1256 1259 return pool->stream_enc[i];
+2
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
··· 1682 1682 * We don't actually support prefetch mode 2, so require that we 1683 1683 * at least support prefetch mode 1. 1684 1684 */ 1685 + context->bw_ctx.dml.validate_max_state = fast_validate; 1685 1686 context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = 1686 1687 dm_allow_self_refresh; 1687 1688 ··· 1692 1691 memset(merge, 0, sizeof(merge)); 1693 1692 vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); 1694 1693 } 1694 + context->bw_ctx.dml.validate_max_state = false; 1695 1695 } 1696 1696 1697 1697 dml_log_mode_support_params(&context->bw_ctx.dml);
+2
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
··· 193 193 194 194 bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel); 195 195 196 + void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); 197 + 196 198 /* definitions for run time init of reg offsets */ 197 199 198 200 /* CLK SRC */
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 736 736 .i2c = true, 737 737 .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled 738 738 .dscl = true, 739 - .cm = false, 739 + .cm = true, 740 740 .mpc = true, 741 741 .optc = true, 742 742 .vpg = true,
+12 -2
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 3498 3498 * TODO: Remove. 3499 3499 */ 3500 3500 uint8_t ramping_boundary; 3501 + 3502 + /** 3503 + * PwrSeq HW Instance. 3504 + */ 3505 + uint8_t pwrseq_inst; 3506 + 3507 + /** 3508 + * Explicit padding to 4 byte boundary. 3509 + */ 3510 + uint8_t pad[3]; 3501 3511 }; 3502 3512 3503 3513 /** ··· 3888 3878 * struct dmub_cmd_panel_cntl_data - Panel control data. 3889 3879 */ 3890 3880 struct dmub_cmd_panel_cntl_data { 3891 - uint32_t inst; /**< panel instance */ 3881 + uint32_t pwrseq_inst; /**< pwrseq instance */ 3892 3882 uint32_t current_backlight; /* in/out */ 3893 3883 uint32_t bl_pwm_cntl; /* in/out */ 3894 3884 uint32_t bl_pwm_period_cntl; /* in/out */ ··· 3947 3937 uint8_t uc_pwr_action; /**< LVTMA_ACTION */ 3948 3938 uint8_t bypass_panel_control_wait; 3949 3939 uint8_t reserved_0[2]; /**< For future use */ 3950 - uint8_t panel_inst; /**< LVTMA control instance */ 3940 + uint8_t pwrseq_inst; /**< LVTMA control instance */ 3951 3941 uint8_t reserved_1[3]; /**< For future use */ 3952 3942 }; 3953 3943
+1 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
··· 64 64 65 65 66 66 /* Default scratch mem size. */ 67 - #define DMUB_SCRATCH_MEM_SIZE (256) 67 + #define DMUB_SCRATCH_MEM_SIZE (1024) 68 68 69 69 /* Number of windows in use. */ 70 70 #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
+1
drivers/gpu/drm/amd/include/amd_shared.h
··· 257 257 DC_DISABLE_MPO = 0x40, 258 258 DC_DISABLE_REPLAY = 0x50, 259 259 DC_ENABLE_DPIA_TRACE = 0x80, 260 + DC_ENABLE_DML2 = 0x100, 260 261 }; 261 262 262 263 enum amd_dpm_forced_level;
+102
drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h
··· 1 + /* 2 + * Copyright (C) 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included 12 + * in all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 18 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + */ 21 + #ifndef _smuio_10_0_2_OFFSET_HEADER 22 + 23 + // addressBlock: smuio_smuio_misc_SmuSmuioDec 24 + // base address: 0x5a000 25 + #define mmSMUIO_MCM_CONFIG 0x0023 26 + #define mmSMUIO_MCM_CONFIG_BASE_IDX 0 27 + #define mmIP_DISCOVERY_VERSION 0x0000 28 + #define mmIP_DISCOVERY_VERSION_BASE_IDX 1 29 + #define mmIO_SMUIO_PINSTRAP 0x01b1 30 + #define mmIO_SMUIO_PINSTRAP_BASE_IDX 1 31 + #define mmSCRATCH_REGISTER0 0x01b2 32 + #define mmSCRATCH_REGISTER0_BASE_IDX 1 33 + #define mmSCRATCH_REGISTER1 0x01b3 34 + #define mmSCRATCH_REGISTER1_BASE_IDX 1 35 + #define mmSCRATCH_REGISTER2 0x01b4 36 + #define mmSCRATCH_REGISTER2_BASE_IDX 1 37 + #define mmSCRATCH_REGISTER3 0x01b5 38 + #define mmSCRATCH_REGISTER3_BASE_IDX 1 39 + #define mmSCRATCH_REGISTER4 0x01b6 40 + #define mmSCRATCH_REGISTER4_BASE_IDX 1 41 + #define mmSCRATCH_REGISTER5 0x01b7 42 + #define mmSCRATCH_REGISTER5_BASE_IDX 1 43 + #define mmSCRATCH_REGISTER6 0x01b8 44 + #define mmSCRATCH_REGISTER6_BASE_IDX 1 45 + #define mmSCRATCH_REGISTER7 0x01b9 46 + #define mmSCRATCH_REGISTER7_BASE_IDX 1 47 + 48 + 49 + // addressBlock: smuio_smuio_reset_SmuSmuioDec 50 + // base address: 0x5a300 51 + #define mmSMUIO_MP_RESET_INTR 0x00c1 52 + #define mmSMUIO_MP_RESET_INTR_BASE_IDX 0 53 + #define mmSMUIO_SOC_HALT 0x00c2 54 + #define mmSMUIO_SOC_HALT_BASE_IDX 0 55 + #define mmSMUIO_GFX_MISC_CNTL 0x00c8 56 + #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 57 + 58 + 59 + // addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec 60 + // base address: 0x5a000 61 + #define mmPWROK_REFCLK_GAP_CYCLES 0x0001 62 + #define mmPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1 63 + #define mmGOLDEN_TSC_INCREMENT_UPPER 0x0004 64 + #define mmGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1 65 + #define mmGOLDEN_TSC_INCREMENT_LOWER 0x0005 66 + #define mmGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1 67 + #define mmGOLDEN_TSC_COUNT_UPPER 0x0025 68 + #define mmGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1 69 + #define mmGOLDEN_TSC_COUNT_LOWER 0x0026 70 + #define mmGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1 71 + #define mmGFX_GOLDEN_TSC_SHADOW_UPPER 0x0029 72 + #define mmGFX_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1 73 + #define mmGFX_GOLDEN_TSC_SHADOW_LOWER 0x002a 74 + #define mmGFX_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1 75 + #define mmSOC_GOLDEN_TSC_SHADOW_UPPER 0x002b 76 + #define mmSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1 77 + #define mmSOC_GOLDEN_TSC_SHADOW_LOWER 0x002c 78 + #define mmSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1 79 + #define mmSOC_GAP_PWROK 0x002d 80 + #define mmSOC_GAP_PWROK_BASE_IDX 1 81 + 82 + // addressBlock: smuio_smuio_swtimer_SmuSmuioDec 83 + // base address: 0x5ac40 84 + #define mmPWR_VIRT_RESET_REQ 0x0110 85 + #define mmPWR_VIRT_RESET_REQ_BASE_IDX 1 86 + #define mmPWR_DISP_TIMER_CONTROL 0x0111 87 + #define mmPWR_DISP_TIMER_CONTROL_BASE_IDX 1 88 + #define mmPWR_DISP_TIMER2_CONTROL 0x0113 89 + #define mmPWR_DISP_TIMER2_CONTROL_BASE_IDX 1 90 + #define mmPWR_DISP_TIMER_GLOBAL_CONTROL 0x0115 91 + #define mmPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1 92 + #define mmPWR_IH_CONTROL 0x0116 93 + #define mmPWR_IH_CONTROL_BASE_IDX 1 94 + 95 + // addressBlock: smuio_smuio_svi0_SmuSmuioDec 96 + // base address: 0x6f000 97 + #define mmSMUSVI0_TEL_PLANE0 0x520e 98 + #define mmSMUSVI0_TEL_PLANE0_BASE_IDX 1 99 + #define mmSMUSVI0_PLANE0_CURRENTVID 0x5217 100 + #define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 1 101 + 102 + #endif
+184
drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h
··· 1 + /* 2 + * Copyright (C) 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included 12 + * in all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 18 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + */ 21 + #ifndef _smuio_10_0_2_SH_MASK_HEADER 22 + 23 + // addressBlock: smuio_smuio_misc_SmuSmuioDec 24 + //SMUIO_MCM_CONFIG 25 + #define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0 26 + #define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2 27 + #define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x5 28 + #define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0x6 29 + #define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10 30 + #define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11 31 + #define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L 32 + #define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL 33 + #define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000020L 34 + #define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x000000C0L 35 + #define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L 36 + #define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L 37 + //IP_DISCOVERY_VERSION 38 + #define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0 39 + #define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL 40 + //IO_SMUIO_PINSTRAP 41 + #define IO_SMUIO_PINSTRAP__AUD_PORT_CONN__SHIFT 0x0 42 + #define IO_SMUIO_PINSTRAP__AUD__SHIFT 0x3 43 + #define IO_SMUIO_PINSTRAP__AUD_PORT_CONN_MASK 0x00000007L 44 + #define IO_SMUIO_PINSTRAP__AUD_MASK 0x00000018L 45 + //SCRATCH_REGISTER0 46 + #define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0 47 + #define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL 48 + //SCRATCH_REGISTER1 49 + #define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0 50 + #define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL 51 + //SCRATCH_REGISTER2 52 + #define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0 53 + #define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL 54 + //SCRATCH_REGISTER3 55 + #define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0 56 + #define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL 57 + //SCRATCH_REGISTER4 58 + #define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0 59 + #define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL 60 + //SCRATCH_REGISTER5 61 + #define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0 62 + #define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL 63 + //SCRATCH_REGISTER6 64 + #define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0 65 + #define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL 66 + //SCRATCH_REGISTER7 67 + #define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0 68 + #define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL 69 + 70 + // addressBlock: smuio_smuio_reset_SmuSmuioDec 71 + //SMUIO_MP_RESET_INTR 72 + #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0 73 + #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L 74 + //SMUIO_SOC_HALT 75 + #define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2 76 + #define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3 77 + #define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L 78 + #define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L 79 + //SMUIO_GFX_MISC_CNTL 80 + #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 81 + #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 82 + #define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH__SHIFT 0x3 83 + #define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN__SHIFT 0x4 84 + #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L 85 + #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L 86 + #define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH_MASK 0x00000008L 87 + #define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN_MASK 0x00000010L 88 + 89 + // addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec 90 + //PWROK_REFCLK_GAP_CYCLES 91 + #define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0 92 + #define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8 93 + #define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL 94 + #define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L 95 + //GOLDEN_TSC_INCREMENT_UPPER 96 + #define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0 97 + #define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL 98 + //GOLDEN_TSC_INCREMENT_LOWER 99 + #define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0 100 + #define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL 101 + //GOLDEN_TSC_COUNT_UPPER 102 + #define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0 103 + #define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL 104 + //GOLDEN_TSC_COUNT_LOWER 105 + #define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0 106 + #define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL 107 + //GFX_GOLDEN_TSC_SHADOW_UPPER 108 + #define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper__SHIFT 0x0 109 + #define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper_MASK 0x00FFFFFFL 110 + //GFX_GOLDEN_TSC_SHADOW_LOWER 111 + #define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower__SHIFT 0x0 112 + #define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower_MASK 0xFFFFFFFFL 113 + //SOC_GOLDEN_TSC_SHADOW_UPPER 114 + #define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0 115 + #define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL 116 + //SOC_GOLDEN_TSC_SHADOW_LOWER 117 + #define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0 118 + #define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL 119 + //SOC_GAP_PWROK 120 + #define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0 121 + #define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L 122 + 123 + // addressBlock: smuio_smuio_swtimer_SmuSmuioDec 124 + //PWR_VIRT_RESET_REQ 125 + #define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0 126 + #define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f 127 + #define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL 128 + #define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L 129 + //PWR_DISP_TIMER_CONTROL 130 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 131 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 132 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a 133 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b 134 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c 135 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d 136 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e 137 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL 138 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L 139 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L 140 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L 141 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L 142 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L 143 + #define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L 144 + //PWR_DISP_TIMER2_CONTROL 145 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 146 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 147 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a 148 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b 149 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c 150 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d 151 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e 152 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL 153 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L 154 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L 155 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L 156 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L 157 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L 158 + #define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L 159 + //PWR_DISP_TIMER_GLOBAL_CONTROL 160 + #define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0 161 + #define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa 162 + #define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL 163 + #define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L 164 + //PWR_IH_CONTROL 165 + #define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0 166 + #define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5 167 + #define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6 168 + #define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f 169 + #define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL 170 + #define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L 171 + #define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L 172 + #define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L 173 + 174 + // addressBlock: smuio_smuio_svi0_SmuSmuioDec 175 + //SMUSVI0_TEL_PLANE0 176 + #define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR__SHIFT 0x0 177 + #define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10 178 + #define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR_MASK 0x000000FFL 179 + #define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L 180 + //SMUSVI0_PLANE0_CURRENTVID 181 + #define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18 182 + #define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L 183 + 184 + #endif
+1
drivers/gpu/drm/amd/include/mes_v11_api_def.h
··· 232 232 }; 233 233 uint32_t oversubscription_timer; 234 234 uint64_t doorbell_info; 235 + uint64_t event_intr_history_gpu_mc_ptr; 235 236 }; 236 237 237 238 uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+2 -2
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 2238 2238 } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) { 2239 2239 if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE) 2240 2240 *states = ATTR_STATE_UNSUPPORTED; 2241 - } else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) { 2241 + } else if (DEVICE_ATTR_IS(pp_mclk_od)) { 2242 2242 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP) 2243 2243 *states = ATTR_STATE_UNSUPPORTED; 2244 - } else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) { 2244 + } else if (DEVICE_ATTR_IS(pp_sclk_od)) { 2245 2245 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP) 2246 2246 *states = ATTR_STATE_UNSUPPORTED; 2247 2247 } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
+6 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 687 687 if (radeon_crtc == NULL) 688 688 return; 689 689 690 + radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0); 691 + if (!radeon_crtc->flip_queue) { 692 + kfree(radeon_crtc); 693 + return; 694 + } 695 + 690 696 drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); 691 697 692 698 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 693 699 radeon_crtc->crtc_id = index; 694 - radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0); 695 700 rdev->mode_info.crtcs[index] = radeon_crtc; 696 701 697 702 if (rdev->family >= CHIP_BONAIRE) {
+3 -1
drivers/gpu/drm/radeon/sumo_dpm.c
··· 1493 1493 non_clock_array_index = power_state->v2.nonClockInfoIndex; 1494 1494 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 1495 1495 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 1496 - if (!rdev->pm.power_state[i].clock_info) 1496 + if (!rdev->pm.power_state[i].clock_info) { 1497 + kfree(rdev->pm.dpm.ps); 1497 1498 return -EINVAL; 1499 + } 1498 1500 ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL); 1499 1501 if (ps == NULL) { 1500 1502 kfree(rdev->pm.dpm.ps);
+3 -1
drivers/gpu/drm/radeon/trinity_dpm.c
··· 1726 1726 non_clock_array_index = power_state->v2.nonClockInfoIndex; 1727 1727 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 1728 1728 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 1729 - if (!rdev->pm.power_state[i].clock_info) 1729 + if (!rdev->pm.power_state[i].clock_info) { 1730 + kfree(rdev->pm.dpm.ps); 1730 1731 return -EINVAL; 1732 + } 1731 1733 ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL); 1732 1734 if (ps == NULL) { 1733 1735 kfree(rdev->pm.dpm.ps);