Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-2020-08-28' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"As expected a bit of an rc3 uptick, amdgpu and msm are the main ones,
one msm patch was from the merge window, but had dependencies and we
dropped it until the other tree had landed. Otherwise it's a couple of
fixes for core, and etnaviv, and single i915, exynos, omap fixes.

I'm still tracking the Sandybridge gpu relocations issue, if we don't
see much movement I might just queue up the reverts. I'll talk to
Daniel next week once he's back from holidays.

core:
- Take modeset bkl for legacy drivers

dp_mst:
- Allow null crtc in dp_mst

i915:
- Fix command parser desc matching with masks

amdgpu:
- Misc display fixes
- Backlight fixes
- MPO fix for DCN1
- Fixes for Sienna Cichlid
- Fixes for Navy Flounder
- Vega SW CTF fixes
- SMU fix for Raven
- Fix a possible overflow in INFO ioctl
- Gfx10 clockgating fix

msm:
- opp/bw scaling patch followup
- frequency restoring fux
- vblank in atomic commit fix
- dpu modesetting fixes
- fencing fix

etnaviv:
- scheduler interaction fix
- gpu init regression fix

exynos:
- Just drop __iommu annotation to fix sparse warning

omap:
- locking state fix"

* tag 'drm-fixes-2020-08-28' of git://anongit.freedesktop.org/drm/drm: (41 commits)
drm/amd/display: Fix memleak in amdgpu_dm_mode_config_init
drm/amdgpu: disable runtime pm for navy_flounder
drm/amd/display: Retry AUX write when fail occurs
drm/amdgpu: Fix buffer overflow in INFO ioctl
drm/amd/powerplay: Fix hardmins not being sent to SMU for RV
drm/amdgpu: use MODE1 reset for navy_flounder by default
drm/amd/pm: correct the thermal alert temperature limit settings
drm/amdgpu: add asd fw check before loading asd
drm/amd/display: Keep current gain when ABM disable immediately
drm/amd/display: Fix passive dongle mistaken as active dongle in EDID emulation
drm/amd/display: Revert HDCP disable sequence change
drm/amd/display: Send DISPLAY_OFF after power down on boot
drm/amdgpu/gfx10: refine mgcg setting
drm/amd/pm: correct Vega20 swctf limit setting
drm/amd/pm: correct Vega12 swctf limit setting
drm/amd/pm: correct Vega10 swctf limit setting
drm/amd/pm: set VCN pg per instances
drm/amd/pm: enable run_btc callback for sienna_cichlid
drivers: gpu: amd: Initialize amdgpu_dm_backlight_caps object to 0 in amdgpu_dm_update_backlight_caps
drm/amd/display: Reject overlay plane configurations in multi-display scenarios
...

+309 -168
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 179 179 case CHIP_VEGA20: 180 180 case CHIP_ARCTURUS: 181 181 case CHIP_SIENNA_CICHLID: 182 + case CHIP_NAVY_FLOUNDER: 182 183 /* enable runpm if runpm=1 */ 183 184 if (amdgpu_runtime_pm > 0) 184 185 adev->runpm = true; ··· 679 678 * in the bitfields */ 680 679 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) 681 680 se_num = 0xffffffff; 681 + else if (se_num >= AMDGPU_GFX_MAX_SE) 682 + return -EINVAL; 682 683 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 683 684 sh_num = 0xffffffff; 685 + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) 686 + return -EINVAL; 684 687 685 688 if (info->read_mmr_reg.count > 128) 686 689 return -EINVAL;
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 522 522 * add workaround to bypass it for sriov now. 523 523 * TODO: add version check to make it common 524 524 */ 525 - if (amdgpu_sriov_vf(psp->adev) || 526 - (psp->adev->asic_type == CHIP_NAVY_FLOUNDER)) 525 + if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw) 527 526 return 0; 528 527 529 528 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+2 -4
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 7263 7263 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 7264 7264 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 7265 7265 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 7266 - RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 7267 - 7268 - /* only for Vega10 & Raven1 */ 7269 - data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; 7266 + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK | 7267 + RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK); 7270 7268 7271 7269 if (def != data) 7272 7270 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
+1
drivers/gpu/drm/amd/amdgpu/nv.c
··· 364 364 365 365 switch (adev->asic_type) { 366 366 case CHIP_SIENNA_CICHLID: 367 + case CHIP_NAVY_FLOUNDER: 367 368 return AMD_RESET_METHOD_MODE1; 368 369 default: 369 370 if (smu_baco_is_support(smu))
+50 -43
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 2834 2834 &dm_atomic_state_funcs); 2835 2835 2836 2836 r = amdgpu_display_modeset_create_props(adev); 2837 - if (r) 2837 + if (r) { 2838 + dc_release_state(state->context); 2839 + kfree(state); 2838 2840 return r; 2841 + } 2839 2842 2840 2843 r = amdgpu_dm_audio_init(adev); 2841 - if (r) 2844 + if (r) { 2845 + dc_release_state(state->context); 2846 + kfree(state); 2842 2847 return r; 2848 + } 2843 2849 2844 2850 return 0; 2845 2851 } ··· 2861 2855 { 2862 2856 #if defined(CONFIG_ACPI) 2863 2857 struct amdgpu_dm_backlight_caps caps; 2858 + 2859 + memset(&caps, 0, sizeof(caps)); 2864 2860 2865 2861 if (dm->backlight_caps.caps_valid) 2866 2862 return; ··· 2902 2894 return rc ? 0 : 1; 2903 2895 } 2904 2896 2905 - static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps, 2906 - const uint32_t user_brightness) 2897 + static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 2898 + unsigned *min, unsigned *max) 2907 2899 { 2908 - u32 min, max, conversion_pace; 2909 - u32 brightness = user_brightness; 2910 - 2911 2900 if (!caps) 2912 - goto out; 2901 + return 0; 2913 2902 2914 - if (!caps->aux_support) { 2915 - max = caps->max_input_signal; 2916 - min = caps->min_input_signal; 2917 - /* 2918 - * The brightness input is in the range 0-255 2919 - * It needs to be rescaled to be between the 2920 - * requested min and max input signal 2921 - * It also needs to be scaled up by 0x101 to 2922 - * match the DC interface which has a range of 2923 - * 0 to 0xffff 2924 - */ 2925 - conversion_pace = 0x101; 2926 - brightness = 2927 - user_brightness 2928 - * conversion_pace 2929 - * (max - min) 2930 - / AMDGPU_MAX_BL_LEVEL 2931 - + min * conversion_pace; 2903 + if (caps->aux_support) { 2904 + // Firmware limits are in nits, DC API wants millinits. 2905 + *max = 1000 * caps->aux_max_input_signal; 2906 + *min = 1000 * caps->aux_min_input_signal; 2932 2907 } else { 2933 - /* TODO 2934 - * We are doing a linear interpolation here, which is OK but 2935 - * does not provide the optimal result. We probably want 2936 - * something close to the Perceptual Quantizer (PQ) curve. 2937 - */ 2938 - max = caps->aux_max_input_signal; 2939 - min = caps->aux_min_input_signal; 2940 - 2941 - brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min 2942 - + user_brightness * max; 2943 - // Multiple the value by 1000 since we use millinits 2944 - brightness *= 1000; 2945 - brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL); 2908 + // Firmware limits are 8-bit, PWM control is 16-bit. 2909 + *max = 0x101 * caps->max_input_signal; 2910 + *min = 0x101 * caps->min_input_signal; 2946 2911 } 2912 + return 1; 2913 + } 2947 2914 2948 - out: 2949 - return brightness; 2915 + static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 2916 + uint32_t brightness) 2917 + { 2918 + unsigned min, max; 2919 + 2920 + if (!get_brightness_range(caps, &min, &max)) 2921 + return brightness; 2922 + 2923 + // Rescale 0..255 to min..max 2924 + return min + DIV_ROUND_CLOSEST((max - min) * brightness, 2925 + AMDGPU_MAX_BL_LEVEL); 2926 + } 2927 + 2928 + static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 2929 + uint32_t brightness) 2930 + { 2931 + unsigned min, max; 2932 + 2933 + if (!get_brightness_range(caps, &min, &max)) 2934 + return brightness; 2935 + 2936 + if (brightness < min) 2937 + return 0; 2938 + // Rescale min..max to 0..255 2939 + return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 2940 + max - min); 2950 2941 } 2951 2942 2952 2943 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) ··· 2961 2954 2962 2955 link = (struct dc_link *)dm->backlight_link; 2963 2956 2964 - brightness = convert_brightness(&caps, bd->props.brightness); 2957 + brightness = convert_brightness_from_user(&caps, bd->props.brightness); 2965 2958 // Change brightness based on AUX property 2966 2959 if (caps.aux_support) 2967 2960 return set_backlight_via_aux(link, brightness); ··· 2978 2971 2979 2972 if (ret == DC_ERROR_UNEXPECTED) 2980 2973 return bd->props.brightness; 2981 - return ret; 2974 + return convert_brightness_to_user(&dm->backlight_caps, ret); 2982 2975 } 2983 2976 2984 2977 static const struct backlight_ops amdgpu_dm_backlight_ops = {
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 67 67 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, 68 68 &operation_result); 69 69 70 - if (payload.write) 70 + if (payload.write && result >= 0) 71 71 result = msg->size; 72 72 73 73 if (result < 0)
+10
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
··· 94 94 return display_count; 95 95 } 96 96 97 + void rn_set_low_power_state(struct clk_mgr *clk_mgr_base) 98 + { 99 + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 100 + 101 + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); 102 + /* update power state */ 103 + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; 104 + } 105 + 97 106 void rn_update_clocks(struct clk_mgr *clk_mgr_base, 98 107 struct dc_state *context, 99 108 bool safe_to_lower) ··· 525 516 .init_clocks = rn_init_clocks, 526 517 .enable_pme_wa = rn_enable_pme_wa, 527 518 .are_clock_states_equal = rn_are_clock_states_equal, 519 + .set_low_power_state = rn_set_low_power_state, 528 520 .notify_wm_ranges = rn_notify_wm_ranges, 529 521 .notify_link_rate_change = rn_notify_link_rate_change, 530 522 };
+2 -1
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 763 763 sink_caps->signal = dp_passive_dongle_detection(link->ddc, 764 764 sink_caps, 765 765 audio_support); 766 + link->dpcd_caps.dongle_type = sink_caps->dongle_type; 766 767 } 767 768 768 769 return true; ··· 3287 3286 core_link_set_avmute(pipe_ctx, true); 3288 3287 } 3289 3288 3290 - dc->hwss.blank_stream(pipe_ctx); 3291 3289 #if defined(CONFIG_DRM_AMD_DC_HDCP) 3292 3290 update_psp_stream_config(pipe_ctx, true); 3293 3291 #endif 3292 + dc->hwss.blank_stream(pipe_ctx); 3294 3293 3295 3294 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 3296 3295 deallocate_mst_payload(pipe_ctx);
+1 -1
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 233 233 union stream_update_flags update_flags; 234 234 }; 235 235 236 - #define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF 236 + #define ABM_LEVEL_IMMEDIATE_DISABLE 255 237 237 238 238 struct dc_stream_update { 239 239 struct dc_stream_state *stream;
+30 -21
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 1450 1450 void dcn10_power_down_on_boot(struct dc *dc) 1451 1451 { 1452 1452 int i = 0; 1453 + struct dc_link *edp_link; 1453 1454 1454 - if (dc->config.power_down_display_on_boot) { 1455 - struct dc_link *edp_link = get_edp_link(dc); 1455 + if (!dc->config.power_down_display_on_boot) 1456 + return; 1456 1457 1457 - if (edp_link && 1458 - edp_link->link_enc->funcs->is_dig_enabled && 1459 - edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && 1460 - dc->hwseq->funcs.edp_backlight_control && 1461 - dc->hwss.power_down && 1462 - dc->hwss.edp_power_control) { 1463 - dc->hwseq->funcs.edp_backlight_control(edp_link, false); 1464 - dc->hwss.power_down(dc); 1465 - dc->hwss.edp_power_control(edp_link, false); 1466 - } else { 1467 - for (i = 0; i < dc->link_count; i++) { 1468 - struct dc_link *link = dc->links[i]; 1458 + edp_link = get_edp_link(dc); 1459 + if (edp_link && 1460 + edp_link->link_enc->funcs->is_dig_enabled && 1461 + edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && 1462 + dc->hwseq->funcs.edp_backlight_control && 1463 + dc->hwss.power_down && 1464 + dc->hwss.edp_power_control) { 1465 + dc->hwseq->funcs.edp_backlight_control(edp_link, false); 1466 + dc->hwss.power_down(dc); 1467 + dc->hwss.edp_power_control(edp_link, false); 1468 + } else { 1469 + for (i = 0; i < dc->link_count; i++) { 1470 + struct dc_link *link = dc->links[i]; 1469 1471 1470 - if (link->link_enc->funcs->is_dig_enabled && 1471 - link->link_enc->funcs->is_dig_enabled(link->link_enc) && 1472 - dc->hwss.power_down) { 1473 - dc->hwss.power_down(dc); 1474 - break; 1475 - } 1476 - 1472 + if (link->link_enc->funcs->is_dig_enabled && 1473 + link->link_enc->funcs->is_dig_enabled(link->link_enc) && 1474 + dc->hwss.power_down) { 1475 + dc->hwss.power_down(dc); 1476 + break; 1477 1477 } 1478 + 1478 1479 } 1479 1480 } 1481 + 1482 + /* 1483 + * Call update_clocks with empty context 1484 + * to send DISPLAY_OFF 1485 + * Otherwise DISPLAY_OFF may not be asserted 1486 + */ 1487 + if (dc->clk_mgr->funcs->set_low_power_state) 1488 + dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); 1480 1489 } 1481 1490 1482 1491 void dcn10_reset_hw_ctx_wrap(
+8
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
··· 1213 1213 bool video_large = false; 1214 1214 bool desktop_large = false; 1215 1215 bool dcc_disabled = false; 1216 + bool mpo_enabled = false; 1216 1217 1217 1218 for (i = 0; i < context->stream_count; i++) { 1218 1219 if (context->stream_status[i].plane_count == 0) ··· 1221 1220 1222 1221 if (context->stream_status[i].plane_count > 2) 1223 1222 return DC_FAIL_UNSUPPORTED_1; 1223 + 1224 + if (context->stream_status[i].plane_count > 1) 1225 + mpo_enabled = true; 1224 1226 1225 1227 for (j = 0; j < context->stream_status[i].plane_count; j++) { 1226 1228 struct dc_plane_state *plane = ··· 1247 1243 } 1248 1244 } 1249 1245 } 1246 + 1247 + /* Disable MPO in multi-display configurations. */ 1248 + if (context->stream_count > 1 && mpo_enabled) 1249 + return DC_FAIL_UNSUPPORTED_1; 1250 1250 1251 1251 /* 1252 1252 * Workaround: On DCN10 there is UMC issue that causes underflow when
+2
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
··· 230 230 231 231 int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); 232 232 233 + void (*set_low_power_state)(struct clk_mgr *clk_mgr); 234 + 233 235 void (*init_clocks)(struct clk_mgr *clk_mgr); 234 236 235 237 void (*enable_pme_wa) (struct clk_mgr *clk_mgr);
+3 -6
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
··· 204 204 { 205 205 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 206 206 207 - if (smu10_data->need_min_deep_sleep_dcefclk && 208 - smu10_data->deep_sleep_dcefclk != clock) { 207 + if (clock && smu10_data->deep_sleep_dcefclk != clock) { 209 208 smu10_data->deep_sleep_dcefclk = clock; 210 209 smum_send_msg_to_smc_with_parameter(hwmgr, 211 210 PPSMC_MSG_SetMinDeepSleepDcefclk, ··· 218 219 { 219 220 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 220 221 221 - if (smu10_data->dcf_actual_hard_min_freq && 222 - smu10_data->dcf_actual_hard_min_freq != clock) { 222 + if (clock && smu10_data->dcf_actual_hard_min_freq != clock) { 223 223 smu10_data->dcf_actual_hard_min_freq = clock; 224 224 smum_send_msg_to_smc_with_parameter(hwmgr, 225 225 PPSMC_MSG_SetHardMinDcefclkByFreq, ··· 232 234 { 233 235 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 234 236 235 - if (smu10_data->f_actual_hard_min_freq && 236 - smu10_data->f_actual_hard_min_freq != clock) { 237 + if (clock && smu10_data->f_actual_hard_min_freq != clock) { 237 238 smu10_data->f_actual_hard_min_freq = clock; 238 239 smum_send_msg_to_smc_with_parameter(hwmgr, 239 240 PPSMC_MSG_SetHardMinFclkByFreq,
+12 -10
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
··· 363 363 static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, 364 364 struct PP_TemperatureRange *range) 365 365 { 366 + struct phm_ppt_v2_information *pp_table_info = 367 + (struct phm_ppt_v2_information *)(hwmgr->pptable); 368 + struct phm_tdp_table *tdp_table = pp_table_info->tdp_table; 366 369 struct amdgpu_device *adev = hwmgr->adev; 367 - int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * 368 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 369 - int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * 370 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 370 + int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP; 371 + int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP; 371 372 uint32_t val; 372 373 373 - if (low < range->min) 374 - low = range->min; 375 - if (high > range->max) 376 - high = range->max; 374 + /* compare them in unit celsius degree */ 375 + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) 376 + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 377 + if (high > tdp_table->usSoftwareShutdownTemp) 378 + high = tdp_table->usSoftwareShutdownTemp; 377 379 378 380 if (low > high) 379 381 return -EINVAL; ··· 384 382 385 383 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 386 384 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 387 - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 388 - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 385 + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); 386 + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); 389 387 val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) & 390 388 (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) & 391 389 (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
+11 -10
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
··· 170 170 static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, 171 171 struct PP_TemperatureRange *range) 172 172 { 173 + struct phm_ppt_v3_information *pptable_information = 174 + (struct phm_ppt_v3_information *)hwmgr->pptable; 173 175 struct amdgpu_device *adev = hwmgr->adev; 174 - int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * 175 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 176 - int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP * 177 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 176 + int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP; 177 + int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP; 178 178 uint32_t val; 179 179 180 - if (low < range->min) 181 - low = range->min; 182 - if (high > range->max) 183 - high = range->max; 180 + /* compare them in unit celsius degree */ 181 + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) 182 + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 183 + if (high > pptable_information->us_software_shutdown_temp) 184 + high = pptable_information->us_software_shutdown_temp; 184 185 185 186 if (low > high) 186 187 return -EINVAL; ··· 190 189 191 190 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 192 191 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 193 - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 194 - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 192 + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); 193 + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); 195 194 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 196 195 197 196 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+11 -10
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
··· 240 240 static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, 241 241 struct PP_TemperatureRange *range) 242 242 { 243 + struct phm_ppt_v3_information *pptable_information = 244 + (struct phm_ppt_v3_information *)hwmgr->pptable; 243 245 struct amdgpu_device *adev = hwmgr->adev; 244 - int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP * 245 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 246 - int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP * 247 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 246 + int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP; 247 + int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP; 248 248 uint32_t val; 249 249 250 - if (low < range->min) 251 - low = range->min; 252 - if (high > range->max) 253 - high = range->max; 250 + /* compare them in unit celsius degree */ 251 + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) 252 + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 253 + if (high > pptable_information->us_software_shutdown_temp) 254 + high = pptable_information->us_software_shutdown_temp; 254 255 255 256 if (low > high) 256 257 return -EINVAL; ··· 260 259 261 260 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 262 261 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 263 - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 264 - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); 262 + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); 263 + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); 265 264 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 266 265 267 266 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+9 -2
drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
··· 95 95 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 96 96 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 97 97 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 98 + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), 98 99 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 99 100 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), 100 101 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), ··· 776 775 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); 777 776 if (ret) 778 777 return ret; 779 - if (adev->asic_type == CHIP_SIENNA_CICHLID) { 778 + if (adev->vcn.num_vcn_inst > 1) { 780 779 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 781 780 0x10000, NULL); 782 781 if (ret) ··· 788 787 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); 789 788 if (ret) 790 789 return ret; 791 - if (adev->asic_type == CHIP_SIENNA_CICHLID) { 790 + if (adev->vcn.num_vcn_inst > 1) { 792 791 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 793 792 0x10000, NULL); 794 793 if (ret) ··· 1731 1730 amdgpu_gfx_off_ctrl(adev, true); 1732 1731 1733 1732 return ret; 1733 + } 1734 + 1735 + static int sienna_cichlid_run_btc(struct smu_context *smu) 1736 + { 1737 + return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); 1734 1738 } 1735 1739 1736 1740 static bool sienna_cichlid_is_baco_supported(struct smu_context *smu) ··· 2725 2719 .mode1_reset = smu_v11_0_mode1_reset, 2726 2720 .get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq, 2727 2721 .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, 2722 + .run_btc = sienna_cichlid_run_btc, 2728 2723 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2729 2724 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 2730 2725 };
+4 -3
drivers/gpu/drm/drm_atomic_helper.c
··· 34 34 #include <drm/drm_bridge.h> 35 35 #include <drm/drm_damage_helper.h> 36 36 #include <drm/drm_device.h> 37 + #include <drm/drm_drv.h> 37 38 #include <drm/drm_plane_helper.h> 38 39 #include <drm/drm_print.h> 39 40 #include <drm/drm_self_refresh_helper.h> ··· 3107 3106 if (ret) 3108 3107 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); 3109 3108 3110 - DRM_MODESET_LOCK_ALL_END(ctx, ret); 3109 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); 3111 3110 } 3112 3111 EXPORT_SYMBOL(drm_atomic_helper_shutdown); 3113 3112 ··· 3247 3246 } 3248 3247 3249 3248 unlock: 3250 - DRM_MODESET_LOCK_ALL_END(ctx, err); 3249 + DRM_MODESET_LOCK_ALL_END(dev, ctx, err); 3251 3250 if (err) 3252 3251 return ERR_PTR(err); 3253 3252 ··· 3328 3327 3329 3328 err = drm_atomic_helper_commit_duplicated_state(state, &ctx); 3330 3329 3331 - DRM_MODESET_LOCK_ALL_END(ctx, err); 3330 + DRM_MODESET_LOCK_ALL_END(dev, ctx, err); 3332 3331 drm_atomic_state_put(state); 3333 3332 3334 3333 return err;
+1 -1
drivers/gpu/drm/drm_color_mgmt.c
··· 294 294 crtc->gamma_size, &ctx); 295 295 296 296 out: 297 - DRM_MODESET_LOCK_ALL_END(ctx, ret); 297 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); 298 298 return ret; 299 299 300 300 }
+1 -3
drivers/gpu/drm/drm_crtc.c
··· 588 588 if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id)) 589 589 return -EACCES; 590 590 591 - mutex_lock(&crtc->dev->mode_config.mutex); 592 591 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 593 592 DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); 594 593 ··· 755 756 fb = NULL; 756 757 mode = NULL; 757 758 758 - DRM_MODESET_LOCK_ALL_END(ctx, ret); 759 - mutex_unlock(&crtc->dev->mode_config.mutex); 759 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); 760 760 761 761 return ret; 762 762 }
+2 -2
drivers/gpu/drm/drm_dp_mst_topology.c
··· 5040 5040 5041 5041 crtc = conn_state->crtc; 5042 5042 5043 - if (WARN_ON(!crtc)) 5044 - return -EINVAL; 5043 + if (!crtc) 5044 + continue; 5045 5045 5046 5046 if (!drm_dp_mst_dsc_aux_for_port(pos->port)) 5047 5047 continue;
+2 -2
drivers/gpu/drm/drm_mode_object.c
··· 428 428 out_unref: 429 429 drm_mode_object_put(obj); 430 430 out: 431 - DRM_MODESET_LOCK_ALL_END(ctx, ret); 431 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); 432 432 return ret; 433 433 } 434 434 ··· 470 470 break; 471 471 } 472 472 drm_property_change_valid_put(prop, ref); 473 - DRM_MODESET_LOCK_ALL_END(ctx, ret); 473 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); 474 474 475 475 return ret; 476 476 }
+1 -1
drivers/gpu/drm/drm_plane.c
··· 792 792 crtc_x, crtc_y, crtc_w, crtc_h, 793 793 src_x, src_y, src_w, src_h, &ctx); 794 794 795 - DRM_MODESET_LOCK_ALL_END(ctx, ret); 795 + DRM_MODESET_LOCK_ALL_END(plane->dev, ctx, ret); 796 796 797 797 return ret; 798 798 }
+9 -2
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 337 337 338 338 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); 339 339 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); 340 - gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); 341 340 gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID); 342 - gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); 341 + 342 + /* 343 + * Reading these two registers on GC600 rev 0x19 result in a 344 + * unhandled fault: external abort on non-linefetch 345 + */ 346 + if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) { 347 + gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); 348 + gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); 349 + } 343 350 344 351 /* 345 352 * !!!! HACK ALERT !!!!
+6 -5
drivers/gpu/drm/etnaviv/etnaviv_sched.c
··· 89 89 u32 dma_addr; 90 90 int change; 91 91 92 + /* block scheduler */ 93 + drm_sched_stop(&gpu->sched, sched_job); 94 + 92 95 /* 93 96 * If the GPU managed to complete this jobs fence, the timout is 94 97 * spurious. Bail out. 95 98 */ 96 99 if (dma_fence_is_signaled(submit->out_fence)) 97 - return; 100 + goto out_no_timeout; 98 101 99 102 /* 100 103 * If the GPU is still making forward progress on the front-end (which ··· 108 105 change = dma_addr - gpu->hangcheck_dma_addr; 109 106 if (change < 0 || change > 16) { 110 107 gpu->hangcheck_dma_addr = dma_addr; 111 - return; 108 + goto out_no_timeout; 112 109 } 113 - 114 - /* block scheduler */ 115 - drm_sched_stop(&gpu->sched, sched_job); 116 110 117 111 if(sched_job) 118 112 drm_sched_increase_karma(sched_job); ··· 120 120 121 121 drm_sched_resubmit_jobs(&gpu->sched); 122 122 123 + out_no_timeout: 123 124 /* restart scheduler after GPU is usable again */ 124 125 drm_sched_start(&gpu->sched, true); 125 126 }
+1 -1
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 92 92 offset = fbi->var.xoffset * fb->format->cpp[0]; 93 93 offset += fbi->var.yoffset * fb->pitches[0]; 94 94 95 - fbi->screen_base = exynos_gem->kvaddr + offset; 95 + fbi->screen_buffer = exynos_gem->kvaddr + offset; 96 96 fbi->screen_size = size; 97 97 fbi->fix.smem_len = size; 98 98
+1 -1
drivers/gpu/drm/exynos/exynos_drm_gem.h
··· 40 40 unsigned int flags; 41 41 unsigned long size; 42 42 void *cookie; 43 - void __iomem *kvaddr; 43 + void *kvaddr; 44 44 dma_addr_t dma_addr; 45 45 unsigned long dma_attrs; 46 46 struct sg_table *sgt;
+10 -4
drivers/gpu/drm/i915/i915_cmd_parser.c
··· 1204 1204 return dst; 1205 1205 } 1206 1206 1207 + static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc, 1208 + const u32 cmd) 1209 + { 1210 + return desc->cmd.value == (cmd & desc->cmd.mask); 1211 + } 1212 + 1207 1213 static bool check_cmd(const struct intel_engine_cs *engine, 1208 1214 const struct drm_i915_cmd_descriptor *desc, 1209 1215 const u32 *cmd, u32 length) ··· 1248 1242 * allowed mask/value pair given in the whitelist entry. 1249 1243 */ 1250 1244 if (reg->mask) { 1251 - if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { 1245 + if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) { 1252 1246 DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n", 1253 1247 reg_addr); 1254 1248 return false; 1255 1249 } 1256 1250 1257 - if (desc->cmd.value == MI_LOAD_REGISTER_REG) { 1251 + if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) { 1258 1252 DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n", 1259 1253 reg_addr); 1260 1254 return false; 1261 1255 } 1262 1256 1263 - if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && 1257 + if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) && 1264 1258 (offset + 2 > length || 1265 1259 (cmd[offset + 1] & reg->mask) != reg->value)) { 1266 1260 DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n", ··· 1484 1478 break; 1485 1479 } 1486 1480 1487 - if (desc->cmd.value == MI_BATCH_BUFFER_START) { 1481 + if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) { 1488 1482 ret = check_bbstart(cmd, offset, length, batch_length, 1489 1483 batch_addr, shadow_addr, 1490 1484 jump_whitelist);
+29 -9
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 133 133 134 134 if (!gmu->legacy) { 135 135 a6xx_hfi_set_freq(gmu, perf_index); 136 - icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); 136 + dev_pm_opp_set_bw(&gpu->pdev->dev, opp); 137 137 pm_runtime_put(gmu->dev); 138 138 return; 139 139 } ··· 157 157 if (ret) 158 158 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 159 159 160 - /* 161 - * Eventually we will want to scale the path vote with the frequency but 162 - * for now leave it at max so that the performance is nominal. 163 - */ 164 - icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); 160 + dev_pm_opp_set_bw(&gpu->pdev->dev, opp); 165 161 pm_runtime_put(gmu->dev); 166 162 } 167 163 ··· 200 204 { 201 205 int ret; 202 206 u32 val; 207 + u32 mask, reset_val; 208 + 209 + val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); 210 + if (val <= 0x20010004) { 211 + mask = 0xffffffff; 212 + reset_val = 0xbabeface; 213 + } else { 214 + mask = 0x1ff; 215 + reset_val = 0x100; 216 + } 203 217 204 218 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 205 219 ··· 221 215 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 222 216 223 217 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 224 - val == 0xbabeface, 100, 10000); 218 + (val & mask) == reset_val, 100, 10000); 225 219 226 220 if (ret) 227 221 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); ··· 851 845 if (IS_ERR_OR_NULL(gpu_opp)) 852 846 return; 853 847 848 + gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ 854 849 a6xx_gmu_set_freq(gpu, gpu_opp); 850 + dev_pm_opp_put(gpu_opp); 851 + } 852 + 853 + static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) 854 + { 855 + struct dev_pm_opp *gpu_opp; 856 + unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; 857 + 858 + gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); 859 + if (IS_ERR_OR_NULL(gpu_opp)) 860 + return; 861 + 862 + dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp); 855 863 dev_pm_opp_put(gpu_opp); 856 864 } 857 865 ··· 902 882 } 903 883 904 884 /* Set the bus quota to a reasonable value for boot */ 905 - icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); 885 + a6xx_gmu_set_initial_bw(gpu, gmu); 906 886 907 887 /* Enable the GMU interrupt */ 908 888 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); ··· 1071 1051 a6xx_gmu_shutdown(gmu); 1072 1052 1073 1053 /* Remove the bus vote */ 1074 - icc_set_bw(gpu->icc_path, 0, 0); 1054 + dev_pm_opp_set_bw(&gpu->pdev->dev, NULL); 1075 1055 1076 1056 /* 1077 1057 * Make sure the GX domain is off before turning off the GMU (CX)
+2 -1
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
··· 938 938 msm_gem_kernel_put(dumper.bo, gpu->aspace, true); 939 939 } 940 940 941 - a6xx_get_debugbus(gpu, a6xx_state); 941 + if (snapshot_debugbus) 942 + a6xx_get_debugbus(gpu, a6xx_state); 942 943 943 944 return &a6xx_state->base; 944 945 }
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
··· 372 372 u32 data; 373 373 u32 count; 374 374 } a6xx_indexed_reglist[] = { 375 - { "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR, 375 + { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR, 376 376 REG_A6XX_CP_SQE_STAT_DATA, 0x33 }, 377 377 { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, 378 378 REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+4
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 14 14 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); 15 15 module_param_named(hang_debug, hang_debug, bool, 0600); 16 16 17 + bool snapshot_debugbus = false; 18 + MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)"); 19 + module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600); 20 + 17 21 static const struct adreno_info gpulist[] = { 18 22 { 19 23 .rev = ADRENO_REV(2, 0, 0, 0),
+1 -1
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 396 396 ring->next = ring->start; 397 397 398 398 /* reset completed fence seqno: */ 399 - ring->memptrs->fence = ring->seqno; 399 + ring->memptrs->fence = ring->fctx->completed_fence; 400 400 ring->memptrs->rptr = 0; 401 401 } 402 402
+2
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 21 21 #define REG_SKIP ~0 22 22 #define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP 23 23 24 + extern bool snapshot_debugbus; 25 + 24 26 /** 25 27 * adreno_regs: List of registers that are used in across all 26 28 * 3D devices. Each device type has different offset value for the same
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 827 827 { 828 828 struct dpu_crtc *dpu_crtc; 829 829 struct drm_encoder *encoder; 830 - bool request_bandwidth; 830 + bool request_bandwidth = false; 831 831 832 832 if (!crtc) { 833 833 DPU_ERROR("invalid crtc\n");
+11 -9
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 599 599 dpu_kms = to_dpu_kms(priv->kms); 600 600 mode = &crtc_state->mode; 601 601 adj_mode = &crtc_state->adjusted_mode; 602 - global_state = dpu_kms_get_existing_global_state(dpu_kms); 602 + global_state = dpu_kms_get_global_state(crtc_state->state); 603 + if (IS_ERR(global_state)) 604 + return PTR_ERR(global_state); 605 + 603 606 trace_dpu_enc_atomic_check(DRMID(drm_enc)); 604 607 605 608 /* perform atomic check on the first physical encoder (master) */ ··· 628 625 /* Reserve dynamic resources now. */ 629 626 if (!ret) { 630 627 /* 631 - * Avoid reserving resources when mode set is pending. Topology 632 - * info may not be available to complete reservation. 628 + * Release and Allocate resources on every modeset 629 + * Dont allocate when active is false. 633 630 */ 634 631 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 635 - ret = dpu_rm_reserve(&dpu_kms->rm, global_state, 636 - drm_enc, crtc_state, topology); 632 + dpu_rm_release(global_state, drm_enc); 633 + 634 + if (!crtc_state->active_changed || crtc_state->active) 635 + ret = dpu_rm_reserve(&dpu_kms->rm, global_state, 636 + drm_enc, crtc_state, topology); 637 637 } 638 638 } 639 639 ··· 1187 1181 struct dpu_encoder_virt *dpu_enc = NULL; 1188 1182 struct msm_drm_private *priv; 1189 1183 struct dpu_kms *dpu_kms; 1190 - struct dpu_global_state *global_state; 1191 1184 int i = 0; 1192 1185 1193 1186 if (!drm_enc) { ··· 1205 1200 1206 1201 priv = drm_enc->dev->dev_private; 1207 1202 dpu_kms = to_dpu_kms(priv->kms); 1208 - global_state = dpu_kms_get_existing_global_state(dpu_kms); 1209 1203 1210 1204 trace_dpu_enc_disable(DRMID(drm_enc)); 1211 1205 ··· 1233 1229 } 1234 1230 1235 1231 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); 1236 - 1237 - dpu_rm_release(global_state, drm_enc); 1238 1232 1239 1233 mutex_unlock(&dpu_enc->enc_lock); 1240 1234 }
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 866 866 crtc_state = drm_atomic_get_new_crtc_state(state->state, 867 867 state->crtc); 868 868 869 - min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); 869 + min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale); 870 870 ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale, 871 - pdpu->pipe_sblk->maxupscale << 16, 871 + pdpu->pipe_sblk->maxdwnscale << 16, 872 872 true, true); 873 873 if (ret) { 874 874 DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+36
drivers/gpu/drm/msm/msm_atomic.c
··· 27 27 return msm_framebuffer_prepare(new_state->fb, kms->aspace); 28 28 } 29 29 30 + /* 31 + * Helpers to control vblanks while we flush.. basically just to ensure 32 + * that vblank accounting is switched on, so we get valid seqn/timestamp 33 + * on pageflip events (if requested) 34 + */ 35 + 36 + static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) 37 + { 38 + struct drm_crtc *crtc; 39 + 40 + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 41 + if (!crtc->state->active) 42 + continue; 43 + drm_crtc_vblank_get(crtc); 44 + } 45 + } 46 + 47 + static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) 48 + { 49 + struct drm_crtc *crtc; 50 + 51 + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 52 + if (!crtc->state->active) 53 + continue; 54 + drm_crtc_vblank_put(crtc); 55 + } 56 + } 57 + 30 58 static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) 31 59 { 32 60 unsigned crtc_mask = BIT(crtc_idx); ··· 72 44 73 45 kms->funcs->enable_commit(kms); 74 46 47 + vblank_get(kms, crtc_mask); 48 + 75 49 /* 76 50 * Flush hardware updates: 77 51 */ ··· 87 57 trace_msm_atomic_wait_flush_start(crtc_mask); 88 58 kms->funcs->wait_flush(kms, crtc_mask); 89 59 trace_msm_atomic_wait_flush_finish(crtc_mask); 60 + 61 + vblank_put(kms, crtc_mask); 90 62 91 63 mutex_lock(&kms->commit_lock); 92 64 kms->funcs->complete_commit(kms, crtc_mask); ··· 253 221 */ 254 222 kms->pending_crtc_mask &= ~crtc_mask; 255 223 224 + vblank_get(kms, crtc_mask); 225 + 256 226 /* 257 227 * Flush hardware updates: 258 228 */ ··· 268 234 trace_msm_atomic_wait_flush_start(crtc_mask); 269 235 kms->funcs->wait_flush(kms, crtc_mask); 270 236 trace_msm_atomic_wait_flush_finish(crtc_mask); 237 + 238 + vblank_put(kms, crtc_mask); 271 239 272 240 mutex_lock(&kms->commit_lock); 273 241 kms->funcs->complete_commit(kms, crtc_mask);
+8
drivers/gpu/drm/msm/msm_drv.c
··· 1320 1320 return 0; 1321 1321 } 1322 1322 1323 + static void msm_pdev_shutdown(struct platform_device *pdev) 1324 + { 1325 + struct drm_device *drm = platform_get_drvdata(pdev); 1326 + 1327 + drm_atomic_helper_shutdown(drm); 1328 + } 1329 + 1323 1330 static const struct of_device_id dt_match[] = { 1324 1331 { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, 1325 1332 { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, ··· 1339 1332 static struct platform_driver msm_platform_driver = { 1340 1333 .probe = msm_pdev_probe, 1341 1334 .remove = msm_pdev_remove, 1335 + .shutdown = msm_pdev_shutdown, 1342 1336 .driver = { 1343 1337 .name = "msm", 1344 1338 .of_match_table = dt_match,
+2 -1
drivers/gpu/drm/msm/msm_ringbuffer.c
··· 27 27 ring->id = id; 28 28 29 29 ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, 30 - MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova); 30 + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo, 31 + &ring->iova); 31 32 32 33 if (IS_ERR(ring->start)) { 33 34 ret = PTR_ERR(ring->start);
+2 -1
drivers/gpu/drm/omapdrm/omap_crtc.c
··· 451 451 if (omap_state->manually_updated) 452 452 return; 453 453 454 - spin_lock_irq(&crtc->dev->event_lock); 455 454 drm_crtc_vblank_on(crtc); 455 + 456 456 ret = drm_crtc_vblank_get(crtc); 457 457 WARN_ON(ret != 0); 458 458 459 + spin_lock_irq(&crtc->dev->event_lock); 459 460 omap_crtc_arm_event(crtc); 460 461 spin_unlock_irq(&crtc->dev->event_lock); 461 462 }
+7 -2
include/drm/drm_modeset_lock.h
··· 164 164 * is 0, so no error checking is necessary 165 165 */ 166 166 #define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \ 167 + if (!drm_drv_uses_atomic_modeset(dev)) \ 168 + mutex_lock(&dev->mode_config.mutex); \ 167 169 drm_modeset_acquire_init(&ctx, flags); \ 168 170 modeset_lock_retry: \ 169 171 ret = drm_modeset_lock_all_ctx(dev, &ctx); \ ··· 174 172 175 173 /** 176 174 * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks 175 + * @dev: drm device 177 176 * @ctx: local modeset acquire context, will be dereferenced 178 177 * @ret: local ret/err/etc variable to track error status 179 178 * ··· 191 188 * to that failure. In both of these cases the code between BEGIN/END will not 192 189 * be run, so the failure will reflect the inability to grab the locks. 193 190 */ 194 - #define DRM_MODESET_LOCK_ALL_END(ctx, ret) \ 191 + #define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \ 195 192 modeset_lock_fail: \ 196 193 if (ret == -EDEADLK) { \ 197 194 ret = drm_modeset_backoff(&ctx); \ ··· 199 196 goto modeset_lock_retry; \ 200 197 } \ 201 198 drm_modeset_drop_locks(&ctx); \ 202 - drm_modeset_acquire_fini(&ctx); 199 + drm_modeset_acquire_fini(&ctx); \ 200 + if (!drm_drv_uses_atomic_modeset(dev)) \ 201 + mutex_unlock(&dev->mode_config.mutex); 203 202 204 203 #endif /* DRM_MODESET_LOCK_H_ */