Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu/mst: Stop ignoring error codes and deadlocking

It appears that amdgpu makes the mistake of completely ignoring the return
values from the DP MST helpers, and instead just returns a simple
true/false. In this case, it seems to have come back to bite us because as
a result of simply returning false from
compute_mst_dsc_configs_for_state(), amdgpu had no way of telling when a
deadlock happened from these helpers. This could definitely result in some
kernel splats.

V2:
* Address Wayne's comments (fix another bunch of spots where we weren't
passing down return codes)

Signed-off-by: Lyude Paul <lyude@redhat.com>
Fixes: 8c20a1ed9b4f ("drm/amd/display: MST DSC compute fair share")
Cc: Harry Wentland <harry.wentland@amd.com>
Cc: <stable@vger.kernel.org> # v5.6+
Reviewed-by: Wayne Lin <Wayne.Lin@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Lyude Paul and committed by
Alex Deucher
7cce4cd6 c6023d73

+147 -118
+10 -8
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 6539 6539 struct drm_connector_state *new_con_state; 6540 6540 struct amdgpu_dm_connector *aconnector; 6541 6541 struct dm_connector_state *dm_conn_state; 6542 - int i, j; 6542 + int i, j, ret; 6543 6543 int vcpi, pbn_div, pbn, slot_num = 0; 6544 6544 6545 6545 for_each_new_connector_in_state(state, connector, new_con_state, i) { ··· 6586 6586 dm_conn_state->pbn = pbn; 6587 6587 dm_conn_state->vcpi_slots = slot_num; 6588 6588 6589 - drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn, 6590 - false); 6589 + ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, 6590 + dm_conn_state->pbn, false); 6591 + if (ret < 0) 6592 + return ret; 6593 + 6591 6594 continue; 6592 6595 } 6593 6596 ··· 9607 9604 9608 9605 #if defined(CONFIG_DRM_AMD_DC_DCN) 9609 9606 if (dc_resource_is_dsc_encoding_supported(dc)) { 9610 - if (!pre_validate_dsc(state, &dm_state, vars)) { 9611 - ret = -EINVAL; 9607 + ret = pre_validate_dsc(state, &dm_state, vars); 9608 + if (ret != 0) 9612 9609 goto fail; 9613 - } 9614 9610 } 9615 9611 #endif 9616 9612 ··· 9704 9702 } 9705 9703 9706 9704 #if defined(CONFIG_DRM_AMD_DC_DCN) 9707 - if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { 9705 + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 9706 + if (ret) { 9708 9707 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); 9709 - ret = -EINVAL; 9710 9708 goto fail; 9711 9709 } 9712 9710
+131 -104
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 710 710 return dsc_config.bits_per_pixel; 711 711 } 712 712 713 - static bool increase_dsc_bpp(struct drm_atomic_state *state, 714 - struct drm_dp_mst_topology_state *mst_state, 715 - struct dc_link *dc_link, 716 - struct dsc_mst_fairness_params *params, 717 - struct dsc_mst_fairness_vars *vars, 718 - int count, 719 - int k) 713 + static int increase_dsc_bpp(struct drm_atomic_state *state, 714 + struct drm_dp_mst_topology_state *mst_state, 715 + struct dc_link *dc_link, 716 + struct dsc_mst_fairness_params *params, 717 + struct dsc_mst_fairness_vars *vars, 718 + int count, 719 + int k) 720 720 { 721 721 int i; 722 722 bool bpp_increased[MAX_PIPES]; ··· 726 726 int remaining_to_increase = 0; 727 727 int link_timeslots_used; 728 728 int fair_pbn_alloc; 729 + int ret = 0; 729 730 730 731 for (i = 0; i < count; i++) { 731 732 if (vars[i + k].dsc_enabled) { ··· 765 764 766 765 if (initial_slack[next_index] > fair_pbn_alloc) { 767 766 vars[next_index].pbn += fair_pbn_alloc; 768 - if (drm_dp_atomic_find_time_slots(state, 769 - params[next_index].port->mgr, 770 - params[next_index].port, 771 - vars[next_index].pbn) < 0) 772 - return false; 773 - if (!drm_dp_mst_atomic_check(state)) { 767 + ret = drm_dp_atomic_find_time_slots(state, 768 + params[next_index].port->mgr, 769 + params[next_index].port, 770 + vars[next_index].pbn); 771 + if (ret < 0) 772 + return ret; 773 + 774 + ret = drm_dp_mst_atomic_check(state); 775 + if (ret == 0) { 774 776 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); 775 777 } else { 776 778 vars[next_index].pbn -= fair_pbn_alloc; 777 - if (drm_dp_atomic_find_time_slots(state, 778 - params[next_index].port->mgr, 779 - params[next_index].port, 780 - vars[next_index].pbn) < 0) 781 - return false; 779 + ret = drm_dp_atomic_find_time_slots(state, 780 + params[next_index].port->mgr, 781 + params[next_index].port, 782 + vars[next_index].pbn); 783 + if (ret < 0) 784 + return ret; 782 785 } 783 786 } else { 784 787 vars[next_index].pbn += initial_slack[next_index]; 785 - if (drm_dp_atomic_find_time_slots(state, 786 - params[next_index].port->mgr, 787 - params[next_index].port, 788 - vars[next_index].pbn) < 0) 789 - return false; 790 - if (!drm_dp_mst_atomic_check(state)) { 788 + ret = drm_dp_atomic_find_time_slots(state, 789 + params[next_index].port->mgr, 790 + params[next_index].port, 791 + vars[next_index].pbn); 792 + if (ret < 0) 793 + return ret; 794 + 795 + ret = drm_dp_mst_atomic_check(state); 796 + if (ret == 0) { 791 797 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; 792 798 } else { 793 799 vars[next_index].pbn -= initial_slack[next_index]; 794 - if (drm_dp_atomic_find_time_slots(state, 795 - params[next_index].port->mgr, 796 - params[next_index].port, 797 - vars[next_index].pbn) < 0) 798 - return false; 800 + ret = drm_dp_atomic_find_time_slots(state, 801 + params[next_index].port->mgr, 802 + params[next_index].port, 803 + vars[next_index].pbn); 804 + if (ret < 0) 805 + return ret; 799 806 } 800 807 } 801 808 802 809 bpp_increased[next_index] = true; 803 810 remaining_to_increase--; 804 811 } 805 - return true; 812 + return 0; 806 813 } 807 814 808 - static bool try_disable_dsc(struct drm_atomic_state *state, 809 - struct dc_link *dc_link, 810 - struct dsc_mst_fairness_params *params, 811 - struct dsc_mst_fairness_vars *vars, 812 - int count, 813 - int k) 815 + static int try_disable_dsc(struct drm_atomic_state *state, 816 + struct dc_link *dc_link, 817 + struct dsc_mst_fairness_params *params, 818 + struct dsc_mst_fairness_vars *vars, 819 + int count, 820 + int k) 814 821 { 815 822 int i; 816 823 bool tried[MAX_PIPES]; ··· 826 817 int max_kbps_increase; 827 818 int next_index; 828 819 int remaining_to_try = 0; 820 + int ret; 829 821 830 822 for (i = 0; i < count; i++) { 831 823 if (vars[i + k].dsc_enabled ··· 857 847 break; 858 848 859 849 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); 860 - if (drm_dp_atomic_find_time_slots(state, 861 - params[next_index].port->mgr, 862 - params[next_index].port, 863 - vars[next_index].pbn) < 0) 864 - return false; 850 + ret = drm_dp_atomic_find_time_slots(state, 851 + params[next_index].port->mgr, 852 + params[next_index].port, 853 + vars[next_index].pbn); 854 + if (ret < 0) 855 + return ret; 865 856 866 - if (!drm_dp_mst_atomic_check(state)) { 857 + ret = drm_dp_mst_atomic_check(state); 858 + if (ret == 0) { 867 859 vars[next_index].dsc_enabled = false; 868 860 vars[next_index].bpp_x16 = 0; 869 861 } else { 870 862 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); 871 - if (drm_dp_atomic_find_time_slots(state, 872 - params[next_index].port->mgr, 873 - params[next_index].port, 874 - vars[next_index].pbn) < 0) 875 - return false; 863 + ret = drm_dp_atomic_find_time_slots(state, 864 + params[next_index].port->mgr, 865 + params[next_index].port, 866 + vars[next_index].pbn); 867 + if (ret < 0) 868 + return ret; 876 869 } 877 870 878 871 tried[next_index] = true; 879 872 remaining_to_try--; 880 873 } 881 - return true; 874 + return 0; 882 875 } 883 876 884 - static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, 885 - struct dc_state *dc_state, 886 - struct dc_link *dc_link, 887 - struct dsc_mst_fairness_vars *vars, 888 - struct drm_dp_mst_topology_mgr *mgr, 889 - int *link_vars_start_index) 877 + static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, 878 + struct dc_state *dc_state, 879 + struct dc_link *dc_link, 880 + struct dsc_mst_fairness_vars *vars, 881 + struct drm_dp_mst_topology_mgr *mgr, 882 + int *link_vars_start_index) 890 883 { 891 884 struct dc_stream_state *stream; 892 885 struct dsc_mst_fairness_params params[MAX_PIPES]; 893 886 struct amdgpu_dm_connector *aconnector; 894 887 struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr); 895 888 int count = 0; 896 - int i, k; 889 + int i, k, ret; 897 890 bool debugfs_overwrite = false; 898 891 899 892 memset(params, 0, sizeof(params)); 900 893 901 894 if (IS_ERR(mst_state)) 902 - return false; 895 + return PTR_ERR(mst_state); 903 896 904 897 mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link); 905 898 #if defined(CONFIG_DRM_AMD_DC_DCN) ··· 953 940 954 941 if (count == 0) { 955 942 ASSERT(0); 956 - return true; 943 + return 0; 957 944 } 958 945 959 946 /* k is start index of vars for current phy link used by mst hub */ ··· 967 954 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); 968 955 vars[i + k].dsc_enabled = false; 969 956 vars[i + k].bpp_x16 = 0; 970 - if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, 971 - vars[i + k].pbn) < 0) 972 - return false; 957 + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, 958 + vars[i + k].pbn); 959 + if (ret < 0) 960 + return ret; 973 961 } 974 - if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { 962 + ret = drm_dp_mst_atomic_check(state); 963 + if (ret == 0 && !debugfs_overwrite) { 975 964 set_dsc_configs_from_fairness_vars(params, vars, count, k); 976 - return true; 965 + return 0; 966 + } else if (ret != -ENOSPC) { 967 + return ret; 977 968 } 978 969 979 970 /* Try max compression */ ··· 986 969 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); 987 970 vars[i + k].dsc_enabled = true; 988 971 vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; 989 - if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, 990 - params[i].port, vars[i + k].pbn) < 0) 991 - return false; 972 + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, 973 + params[i].port, vars[i + k].pbn); 974 + if (ret < 0) 975 + return ret; 992 976 } else { 993 977 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); 994 978 vars[i + k].dsc_enabled = false; 995 979 vars[i + k].bpp_x16 = 0; 996 - if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, 997 - params[i].port, vars[i + k].pbn) < 0) 998 - return false; 980 + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, 981 + params[i].port, vars[i + k].pbn); 982 + if (ret < 0) 983 + return ret; 999 984 } 1000 985 } 1001 - if (drm_dp_mst_atomic_check(state)) 1002 - return false; 986 + ret = drm_dp_mst_atomic_check(state); 987 + if (ret != 0) 988 + return ret; 1003 989 1004 990 /* Optimize degree of compression */ 1005 - if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k)) 1006 - return false; 991 + ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k); 992 + if (ret < 0) 993 + return ret; 1007 994 1008 - if (!try_disable_dsc(state, dc_link, params, vars, count, k)) 1009 - return false; 995 + ret = try_disable_dsc(state, dc_link, params, vars, count, k); 996 + if (ret < 0) 997 + return ret; 1010 998 1011 999 set_dsc_configs_from_fairness_vars(params, vars, count, k); 1012 1000 1013 - return true; 1001 + return 0; 1014 1002 } 1015 1003 1016 1004 static bool is_dsc_need_re_compute( ··· 1116 1094 return is_dsc_need_re_compute; 1117 1095 } 1118 1096 1119 - bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1120 - struct dc_state *dc_state, 1121 - struct dsc_mst_fairness_vars *vars) 1097 + int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1098 + struct dc_state *dc_state, 1099 + struct dsc_mst_fairness_vars *vars) 1122 1100 { 1123 1101 int i, j; 1124 1102 struct dc_stream_state *stream; 1125 1103 bool computed_streams[MAX_PIPES]; 1126 1104 struct amdgpu_dm_connector *aconnector; 1127 1105 int link_vars_start_index = 0; 1106 + int ret = 0; 1128 1107 1129 1108 for (i = 0; i < dc_state->stream_count; i++) 1130 1109 computed_streams[i] = false; ··· 1148 1125 continue; 1149 1126 1150 1127 if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) 1151 - return false; 1128 + return -EINVAL; 1152 1129 1153 1130 if (!is_dsc_need_re_compute(state, dc_state, stream->link)) 1154 1131 continue; 1155 1132 1156 1133 mutex_lock(&aconnector->mst_mgr.lock); 1157 - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, 1158 - &aconnector->mst_mgr, 1159 - &link_vars_start_index)) { 1134 + 1135 + ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, 1136 + &aconnector->mst_mgr, 1137 + &link_vars_start_index); 1138 + if (ret != 0) { 1160 1139 mutex_unlock(&aconnector->mst_mgr.lock); 1161 - return false; 1140 + return ret; 1162 1141 } 1163 1142 mutex_unlock(&aconnector->mst_mgr.lock); 1164 1143 ··· 1175 1150 1176 1151 if (stream->timing.flags.DSC == 1) 1177 1152 if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) 1178 - return false; 1153 + return -EINVAL; 1179 1154 } 1180 1155 1181 - return true; 1156 + return ret; 1182 1157 } 1183 1158 1184 - static bool 1185 - pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1186 - struct dc_state *dc_state, 1187 - struct dsc_mst_fairness_vars *vars) 1159 + static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1160 + struct dc_state *dc_state, 1161 + struct dsc_mst_fairness_vars *vars) 1188 1162 { 1189 1163 int i, j; 1190 1164 struct dc_stream_state *stream; 1191 1165 bool computed_streams[MAX_PIPES]; 1192 1166 struct amdgpu_dm_connector *aconnector; 1193 1167 int link_vars_start_index = 0; 1168 + int ret; 1194 1169 1195 1170 for (i = 0; i < dc_state->stream_count; i++) 1196 1171 computed_streams[i] = false; ··· 1216 1191 continue; 1217 1192 1218 1193 mutex_lock(&aconnector->mst_mgr.lock); 1219 - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, 1220 - &aconnector->mst_mgr, 1221 - &link_vars_start_index)) { 1194 + ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, 1195 + &aconnector->mst_mgr, 1196 + &link_vars_start_index); 1197 + if (ret != 0) { 1222 1198 mutex_unlock(&aconnector->mst_mgr.lock); 1223 - return false; 1199 + return ret; 1224 1200 } 1225 1201 mutex_unlock(&aconnector->mst_mgr.lock); 1226 1202 ··· 1231 1205 } 1232 1206 } 1233 1207 1234 - return true; 1208 + return ret; 1235 1209 } 1236 1210 1237 1211 static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, ··· 1286 1260 return ret; 1287 1261 } 1288 1262 1289 - bool pre_validate_dsc(struct drm_atomic_state *state, 1290 - struct dm_atomic_state **dm_state_ptr, 1291 - struct dsc_mst_fairness_vars *vars) 1263 + int pre_validate_dsc(struct drm_atomic_state *state, 1264 + struct dm_atomic_state **dm_state_ptr, 1265 + struct dsc_mst_fairness_vars *vars) 1292 1266 { 1293 1267 int i; 1294 1268 struct dm_atomic_state *dm_state; ··· 1297 1271 1298 1272 if (!is_dsc_precompute_needed(state)) { 1299 1273 DRM_INFO_ONCE("DSC precompute is not needed.\n"); 1300 - return true; 1274 + return 0; 1301 1275 } 1302 - if (dm_atomic_get_state(state, dm_state_ptr)) { 1276 + ret = dm_atomic_get_state(state, dm_state_ptr); 1277 + if (ret != 0) { 1303 1278 DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); 1304 - return false; 1279 + return ret; 1305 1280 } 1306 1281 dm_state = *dm_state_ptr; 1307 1282 ··· 1314 1287 1315 1288 local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL); 1316 1289 if (!local_dc_state) 1317 - return false; 1290 + return -ENOMEM; 1318 1291 1319 1292 for (i = 0; i < local_dc_state->stream_count; i++) { 1320 1293 struct dc_stream_state *stream = dm_state->context->streams[i]; ··· 1350 1323 if (ret != 0) 1351 1324 goto clean_exit; 1352 1325 1353 - if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) { 1326 + ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars); 1327 + if (ret != 0) { 1354 1328 DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); 1355 - ret = -EINVAL; 1356 1329 goto clean_exit; 1357 1330 } 1358 1331 ··· 1383 1356 1384 1357 kfree(local_dc_state); 1385 1358 1386 - return (ret == 0); 1359 + return ret; 1387 1360 } 1388 1361 1389 1362 static unsigned int kbps_from_pbn(unsigned int pbn)
+6 -6
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
··· 53 53 struct amdgpu_dm_connector *aconnector; 54 54 }; 55 55 56 - bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 57 - struct dc_state *dc_state, 58 - struct dsc_mst_fairness_vars *vars); 56 + int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 57 + struct dc_state *dc_state, 58 + struct dsc_mst_fairness_vars *vars); 59 59 60 60 bool needs_dsc_aux_workaround(struct dc_link *link); 61 61 62 - bool pre_validate_dsc(struct drm_atomic_state *state, 63 - struct dm_atomic_state **dm_state_ptr, 64 - struct dsc_mst_fairness_vars *vars); 62 + int pre_validate_dsc(struct drm_atomic_state *state, 63 + struct dm_atomic_state **dm_state_ptr, 64 + struct dsc_mst_fairness_vars *vars); 65 65 66 66 enum dc_status dm_dp_mst_is_port_support_mode( 67 67 struct amdgpu_dm_connector *aconnector,