Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: preserve RSMU UMC index mode state

between UMC RAS err register access restore previous RSMU UMC index mode state

Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: John Clements <john.clements@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

John Clements and committed by
Alex Deucher
eee2eaba 9c8c81fe

+41 -2
+41 -2
drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
··· 54 54 {9, 25, 0, 16}, {15, 31, 6, 22} 55 55 }; 56 56 57 + static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev) 58 + { 59 + WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 60 + RSMU_UMC_INDEX_MODE_EN, 1); 61 + } 62 + 57 63 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) 58 64 { 59 65 WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 60 66 RSMU_UMC_INDEX_MODE_EN, 0); 67 + } 68 + 69 + static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev) 70 + { 71 + uint32_t rsmu_umc_index; 72 + 73 + rsmu_umc_index = RREG32_SOC15(RSMU, 0, 74 + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); 75 + 76 + return REG_GET_FIELD(rsmu_umc_index, 77 + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 78 + RSMU_UMC_INDEX_MODE_EN); 61 79 } 62 80 63 81 static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev, ··· 181 163 uint32_t ch_inst = 0; 182 164 uint32_t umc_reg_offset = 0; 183 165 166 + uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev); 167 + 168 + if (rsmu_umc_index_state) 169 + umc_v6_1_disable_umc_index_mode(adev); 170 + 184 171 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { 185 172 umc_reg_offset = get_umc_6_reg_offset(adev, 186 173 umc_inst, ··· 198 175 umc_reg_offset, 199 176 &(err_data->ue_count)); 200 177 } 178 + 179 + if (rsmu_umc_index_state) 180 + umc_v6_1_enable_umc_index_mode(adev); 201 181 } 202 182 203 183 static void umc_v6_1_query_error_address(struct amdgpu_device *adev, ··· 242 216 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && 243 217 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || 244 218 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { 245 - err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); 246 219 220 + err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); 247 221 /* the lowest lsb bits should be ignored */ 248 222 lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB); 249 223 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); ··· 283 257 uint32_t ch_inst = 0; 284 258 uint32_t umc_reg_offset = 0; 285 259 260 + uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev); 261 + 262 + if (rsmu_umc_index_state) 263 + umc_v6_1_disable_umc_index_mode(adev); 264 + 286 265 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { 287 266 umc_reg_offset = get_umc_6_reg_offset(adev, 288 267 umc_inst, ··· 300 269 umc_inst); 301 270 } 302 271 272 + if (rsmu_umc_index_state) 273 + umc_v6_1_enable_umc_index_mode(adev); 303 274 } 304 275 305 276 static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, ··· 348 315 uint32_t ch_inst = 0; 349 316 uint32_t umc_reg_offset = 0; 350 317 351 - umc_v6_1_disable_umc_index_mode(adev); 318 + uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev); 319 + 320 + if (rsmu_umc_index_state) 321 + umc_v6_1_disable_umc_index_mode(adev); 352 322 353 323 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { 354 324 umc_reg_offset = get_umc_6_reg_offset(adev, ··· 360 324 361 325 umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset); 362 326 } 327 + 328 + if (rsmu_umc_index_state) 329 + umc_v6_1_enable_umc_index_mode(adev); 363 330 } 364 331 365 332 const struct amdgpu_umc_funcs umc_v6_1_funcs = {